code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import sys
import turtle
def _A ( A__ , A__ ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _A ( A__ , A__ , A__ , A__ , ):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowerCAmelCase__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowerCAmelCase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
__lowercase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A__ ) )
return round(A__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 8.988e9 # units = N * m^s * C^-2
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__lowercase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__lowercase = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__lowercase = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__lowercase = (COULOMBS_CONSTANT * charge_product / abs(A__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'switch_transformers'
SCREAMING_SNAKE_CASE : Union[str, Any] = ['past_key_values']
SCREAMING_SNAKE_CASE : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Optional[Any] ,lowercase__ : Tuple=3_2_1_2_8 ,lowercase__ : str=7_6_8 ,lowercase__ : Any=6_4 ,lowercase__ : List[str]=2_0_4_8 ,lowercase__ : Union[str, Any]=6_4 ,lowercase__ : List[str]=1_2 ,lowercase__ : List[Any]=3 ,lowercase__ : str=1_2 ,lowercase__ : Optional[int]=3 ,lowercase__ : Union[str, Any]=1_2 ,lowercase__ : Any=8 ,lowercase__ : List[str]=False ,lowercase__ : Any=0.0_1 ,lowercase__ : str="float32" ,lowercase__ : List[str]=False ,lowercase__ : int=3_2 ,lowercase__ : str=1_2_8 ,lowercase__ : List[str]=0.1 ,lowercase__ : Dict=1e-6 ,lowercase__ : Dict=0.0_0_1 ,lowercase__ : Union[str, Any]=0.0_0_1 ,lowercase__ : Optional[Any]=1.0 ,lowercase__ : int="relu" ,lowercase__ : Any=True ,lowercase__ : int=False ,lowercase__ : List[Any]=True ,lowercase__ : Optional[Any]=0 ,lowercase__ : Union[str, Any]=1 ,**lowercase__ : List[Any] ,):
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_sparse_encoder_layers
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__lowercase = self.num_layers // self.num_sparse_encoder_layers
else:
__lowercase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__lowercase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__lowercase = self.num_decoder_layers # HACK: this will create 0 sparse layers
__lowercase = num_heads
__lowercase = num_experts
__lowercase = expert_capacity
__lowercase = router_bias
__lowercase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase = router_dtype
__lowercase = router_ignore_padding_tokens
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = add_router_probs
__lowercase = router_z_loss_coef
__lowercase = router_aux_loss_coef
__lowercase = self.feed_forward_proj.split('''-''' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == '''gated'''
if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowercase = '''gelu_new'''
super().__init__(
pad_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,**lowercase__ ,)
| 41 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _A ( A__ ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _A ( ):
"""simple docstring"""
__lowercase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=A__ )
__lowercase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A__ )
EnvironmentCommand.register_subcommand(A__ )
TestCommand.register_subcommand(A__ )
RunBeamCommand.register_subcommand(A__ )
DummyDataCommand.register_subcommand(A__ )
# Parse args
__lowercase , __lowercase = parser.parse_known_args()
if not hasattr(A__ , '''func''' ):
parser.print_help()
exit(1 )
__lowercase = parse_unknown_args(A__ )
# Run
__lowercase = args.func(A__ , **A__ )
service.run()
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 1 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def _A ( A__ , A__ , A__=8 ):
"""simple docstring"""
__lowercase = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowercase = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : MultilingualCLIP ,lowercase__ : XLMRobertaTokenizer ,lowercase__ : UNetaDConditionModel ,lowercase__ : Union[DDIMScheduler, DDPMScheduler] ,lowercase__ : VQModel ,):
super().__init__()
self.register_modules(
text_encoder=lowercase__ ,tokenizer=lowercase__ ,unet=lowercase__ ,scheduler=lowercase__ ,movq=lowercase__ ,)
__lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Dict ):
if latents is None:
__lowercase = randn_tensor(lowercase__ ,generator=lowercase__ ,device=lowercase__ ,dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowercase = latents.to(lowercase__ )
__lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Tuple=None ,):
__lowercase = len(lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else 1
# get prompt text embeddings
__lowercase = self.tokenizer(
lowercase__ ,padding='''max_length''' ,truncation=lowercase__ ,max_length=7_7 ,return_attention_mask=lowercase__ ,add_special_tokens=lowercase__ ,return_tensors='''pt''' ,)
__lowercase = text_inputs.input_ids
__lowercase = self.tokenizer(lowercase__ ,padding='''longest''' ,return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase__ ,lowercase__ ):
__lowercase = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowercase = text_input_ids.to(lowercase__ )
__lowercase = text_inputs.attention_mask.to(lowercase__ )
__lowercase , __lowercase = self.text_encoder(
input_ids=lowercase__ ,attention_mask=lowercase__ )
__lowercase = prompt_embeds.repeat_interleave(lowercase__ ,dim=0 )
__lowercase = text_encoder_hidden_states.repeat_interleave(lowercase__ ,dim=0 )
__lowercase = text_mask.repeat_interleave(lowercase__ ,dim=0 )
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''''''] * batch_size
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !="
F" {type(lowercase__ )}." )
elif isinstance(lowercase__ ,lowercase__ ):
__lowercase = [negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
__lowercase = negative_prompt
__lowercase = self.tokenizer(
lowercase__ ,padding='''max_length''' ,max_length=7_7 ,truncation=lowercase__ ,return_attention_mask=lowercase__ ,add_special_tokens=lowercase__ ,return_tensors='''pt''' ,)
__lowercase = uncond_input.input_ids.to(lowercase__ )
__lowercase = uncond_input.attention_mask.to(lowercase__ )
__lowercase , __lowercase = self.text_encoder(
input_ids=lowercase__ ,attention_mask=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = negative_prompt_embeds.shape[1]
__lowercase = negative_prompt_embeds.repeat(1 ,lowercase__ )
__lowercase = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowercase__ )
__lowercase = uncond_text_encoder_hidden_states.shape[1]
__lowercase = uncond_text_encoder_hidden_states.repeat(1 ,lowercase__ ,1 )
__lowercase = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,lowercase__ ,-1 )
__lowercase = uncond_text_mask.repeat_interleave(lowercase__ ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowercase = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowercase = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__lowercase = torch.device(F"cuda:{gpu_id}" )
__lowercase = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Union[str, Any]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' ,'''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__lowercase = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' ,silence_dtype_warnings=lowercase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowercase , __lowercase = cpu_offload_with_hook(lowercase__ ,lowercase__ ,prev_module_hook=lowercase__ )
if self.safety_checker is not None:
__lowercase , __lowercase = cpu_offload_with_hook(self.safety_checker ,lowercase__ ,prev_module_hook=lowercase__ )
# We'll offload the last model manually.
__lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : Dict ):
if not hasattr(self.unet ,'''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ ,'''_hf_hook''' )
and hasattr(module._hf_hook ,'''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__( self : Union[str, Any] ,lowercase__ : Union[str, List[str]] ,lowercase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowercase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowercase__ : Optional[Union[str, List[str]]] = None ,lowercase__ : int = 5_1_2 ,lowercase__ : int = 5_1_2 ,lowercase__ : int = 1_0_0 ,lowercase__ : float = 4.0 ,lowercase__ : int = 1 ,lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase__ : Optional[torch.FloatTensor] = None ,lowercase__ : Optional[str] = "pil" ,lowercase__ : bool = True ,):
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = 1
elif isinstance(lowercase__ ,lowercase__ ):
__lowercase = len(lowercase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}" )
__lowercase = self._execution_device
__lowercase = batch_size * num_images_per_prompt
__lowercase = guidance_scale > 1.0
__lowercase , __lowercase , __lowercase = self._encode_prompt(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = torch.cat(lowercase__ ,dim=0 )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = torch.cat(lowercase__ ,dim=0 )
if do_classifier_free_guidance:
__lowercase = image_embeds.repeat_interleave(lowercase__ ,dim=0 )
__lowercase = negative_image_embeds.repeat_interleave(lowercase__ ,dim=0 )
__lowercase = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=lowercase__ )
self.scheduler.set_timesteps(lowercase__ ,device=lowercase__ )
__lowercase = self.scheduler.timesteps
__lowercase = self.unet.config.in_channels
__lowercase , __lowercase = get_new_h_w(lowercase__ ,lowercase__ ,self.movq_scale_factor )
# create initial latent
__lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,lowercase__ ,lowercase__ ,lowercase__ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
__lowercase = self.unet(
sample=lowercase__ ,timestep=lowercase__ ,encoder_hidden_states=lowercase__ ,added_cond_kwargs=lowercase__ ,return_dict=lowercase__ ,)[0]
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.split(latents.shape[1] ,dim=1 )
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase , __lowercase = variance_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase , __lowercase = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(
lowercase__ ,lowercase__ ,lowercase__ ,generator=lowercase__ ,).prev_sample
# post-processing
__lowercase = self.movq.decode(lowercase__ ,force_not_quantize=lowercase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowercase = image * 0.5 + 0.5
__lowercase = image.clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 41 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 'distilbert'
SCREAMING_SNAKE_CASE : Any = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Union[str, Any] ,lowercase__ : List[Any]=3_0_5_2_2 ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : Optional[int]=False ,lowercase__ : Optional[Any]=6 ,lowercase__ : Optional[int]=1_2 ,lowercase__ : List[Any]=7_6_8 ,lowercase__ : Any=4 * 7_6_8 ,lowercase__ : Tuple=0.1 ,lowercase__ : Tuple=0.1 ,lowercase__ : List[str]="gelu" ,lowercase__ : List[Any]=0.0_2 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=0.2 ,lowercase__ : List[str]=0 ,**lowercase__ : Tuple ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = sinusoidal_pos_embds
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dim
__lowercase = hidden_dim
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation
__lowercase = initializer_range
__lowercase = qa_dropout
__lowercase = seq_classif_dropout
super().__init__(**lowercase__ ,pad_token_id=lowercase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : int ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCAmelCase__ = '''A photo of sks dog in a bucket'''
lowerCAmelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 1 |
'''simple docstring'''
import os
from pathlib import Path
def _A ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
__lowercase = Path(A__ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__lowercase = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , A__ , with_cuda=A__ , extra_include_paths=[str(A__ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 41 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : str ,lowercase__ : Dict ):
__lowercase = parent
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return {}
def _A ( ):
"""simple docstring"""
__lowercase = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__lowercase = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = MarkupLMFeatureExtractionTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# Initialize feature_extractor
__lowercase = self.feature_extraction_class()
# Test not batched input
__lowercase = get_html_strings()[0]
__lowercase = feature_extractor(lowercase__ )
# fmt: off
__lowercase = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__lowercase = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes ,lowercase__ )
self.assertEqual(encoding.xpaths ,lowercase__ )
# Test batched
__lowercase = get_html_strings()
__lowercase = feature_extractor(lowercase__ )
# fmt: off
__lowercase = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__lowercase = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) ,2 )
self.assertEqual(len(encoding.xpaths ) ,2 )
self.assertEqual(encoding.nodes ,lowercase__ )
self.assertEqual(encoding.xpaths ,lowercase__ )
| 41 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 1 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'time_series_transformer'
SCREAMING_SNAKE_CASE : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Union[str, Any] ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = None ,lowercase__ : str = "student_t" ,lowercase__ : str = "nll" ,lowercase__ : int = 1 ,lowercase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] ,lowercase__ : Optional[Union[str, bool]] = "mean" ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : Optional[List[int]] = None ,lowercase__ : Optional[List[int]] = None ,lowercase__ : int = 3_2 ,lowercase__ : int = 3_2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : bool = True ,lowercase__ : str = "gelu" ,lowercase__ : int = 6_4 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : int = 1_0_0 ,lowercase__ : float = 0.0_2 ,lowercase__ : Any=True ,**lowercase__ : List[str] ,):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(lowercase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
def __init__( self : str ,lowercase__ : Dict ,lowercase__ : int="<s>" ,lowercase__ : List[Any]="</s>" ,lowercase__ : Any="</s>" ,lowercase__ : Union[str, Any]="<s>" ,lowercase__ : str="<unk>" ,lowercase__ : Optional[int]="<pad>" ,lowercase__ : Dict="<mask>" ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : Tuple ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,cls_token=lowercase__ ,pad_token=lowercase__ ,mask_token=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
__lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase = 1
__lowercase = len(self.sp_model ) + self.fairseq_offset
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
__lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] ,lowercase__ : Tuple ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
__lowercase = ''''''.join(lowercase__ ).replace(lowercase__ ,''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 1 |
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 1 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _A ( A__ , A__=False ):
"""simple docstring"""
__lowercase = OmegaConf.load(A__ )
if display:
print(yaml.dump(OmegaConf.to_container(A__ ) ) )
return config
def _A ( A__ , A__=None , A__=None ):
"""simple docstring"""
if conf_path is None:
__lowercase = '''./model_checkpoints/vqgan_only.yaml'''
__lowercase = load_config(A__ , display=A__ )
__lowercase = VQModel(**config.model.params )
if ckpt_path is None:
__lowercase = '''./model_checkpoints/vqgan_only.pt'''
__lowercase = torch.load(A__ , map_location=A__ )
if ".ckpt" in ckpt_path:
__lowercase = sd['''state_dict''']
model.load_state_dict(A__ , strict=A__ )
model.to(A__ )
del sd
return model
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase = model.encode(A__ )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__lowercase = model.decode(A__ )
return xrec
def _A ( A__ , A__=False ):
"""simple docstring"""
__lowercase , __lowercase = string.rsplit('''.''' , 1 )
if reload:
__lowercase = importlib.import_module(A__ )
importlib.reload(A__ )
return getattr(importlib.import_module(A__ , package=A__ ) , cls )
def _A ( A__ ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def _A ( A__ , A__ , A__=True , A__=True ):
"""simple docstring"""
__lowercase = instantiate_from_config(A__ )
if sd is not None:
model.load_state_dict(A__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if ckpt:
__lowercase = torch.load(A__ , map_location='''cpu''' )
__lowercase = pl_sd['''global_step''']
print(F"loaded model from global step {global_step}." )
else:
__lowercase = {'''state_dict''': None}
__lowercase = None
__lowercase = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=A__ , eval_mode=A__ )['''model''']
return model, global_step
| 41 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
__lowercase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowercase = math.floor(val / multiple ) * multiple
if x < min_val:
__lowercase = math.ceil(val / multiple ) * multiple
return x
__lowercase = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
__lowercase , __lowercase = get_image_size(A__ )
__lowercase , __lowercase = output_size
# determine new height and width
__lowercase = output_height / input_height
__lowercase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowercase = scale_width
else:
# fit height
__lowercase = scale_height
__lowercase = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
__lowercase = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : Tuple ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : bool = False ,lowercase__ : int = 1 ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,**lowercase__ : Dict ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
__lowercase = get_size_dict(lowercase__ )
__lowercase = do_resize
__lowercase = size
__lowercase = keep_aspect_ratio
__lowercase = ensure_multiple_of
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : bool = False ,lowercase__ : int = 1 ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Tuple ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(
lowercase__ ,output_size=(size['''height'''], size['''width''']) ,keep_aspect_ratio=lowercase__ ,multiple=lowercase__ ,)
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[int, float] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Optional[Any] ,):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Tuple ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : float = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : str ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ )
__lowercase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowercase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ,lowercase__ : List[Tuple] = None ):
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase__ ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(lowercase__ ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=lowercase__ )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 41 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 1 |
'''simple docstring'''
lowerCAmelCase__ = range(2, 20 + 1)
lowerCAmelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = sum(a_i[j] for j in range(A__ , len(A__ ) ) )
__lowercase = sum(a_i[j] * base[j] for j in range(min(len(A__ ) , A__ ) ) )
__lowercase , __lowercase = 0, 0
__lowercase = n - i
__lowercase = memo.get(A__ )
if sub_memo is not None:
__lowercase = sub_memo.get(A__ )
if jumps is not None and len(A__ ) > 0:
# find and make the largest jump without going over
__lowercase = -1
for _k in range(len(A__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__lowercase = _k
break
if max_jump >= 0:
__lowercase , __lowercase , __lowercase = jumps[max_jump]
# since the difference between jumps is cached, add c
__lowercase = diff + c
for j in range(min(A__ , len(A__ ) ) ):
__lowercase , __lowercase = divmod(A__ , 10 )
if new_c > 0:
add(A__ , A__ , A__ )
else:
__lowercase = []
else:
__lowercase = {c: []}
__lowercase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__lowercase , __lowercase = next_term(A__ , k - 1 , i + dn , A__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__lowercase , __lowercase = compute(A__ , A__ , i + dn , A__ )
diff += _diff
dn += terms_jumped
__lowercase = sub_memo[c]
# keep jumps sorted by # of terms skipped
__lowercase = 0
while j < len(A__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A__ , (diff, dn, k) )
return (diff, dn)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(A__ ):
a_i.extend([0 for _ in range(k - len(A__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__lowercase = i
__lowercase , __lowercase , __lowercase = 0, 0, 0
for j in range(len(A__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__lowercase = ds_c + ds_b
diff += addend
__lowercase = 0
for j in range(A__ ):
__lowercase = a_i[j] + addend
__lowercase , __lowercase = divmod(A__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A__ , A__ , A__ )
return diff, i - start_i
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ , len(A__ ) ):
__lowercase = digits[j] + addend
if s >= 10:
__lowercase , __lowercase = divmod(A__ , 10 )
__lowercase = addend // 10 + quotient
else:
__lowercase = s
__lowercase = addend // 10
if addend == 0:
break
while addend > 0:
__lowercase , __lowercase = divmod(A__ , 10 )
digits.append(A__ )
def _A ( A__ = 10**15 ):
"""simple docstring"""
__lowercase = [1]
__lowercase = 1
__lowercase = 0
while True:
__lowercase , __lowercase = next_term(A__ , 20 , i + dn , A__ )
dn += terms_jumped
if dn == n - i:
break
__lowercase = 0
for j in range(len(A__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 41 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_euler''' )
__lowercase = '''A painting of a squirrel eating a burger'''
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe([prompt] ,generator=lowercase__ ,guidance_scale=9.0 ,num_inference_steps=2_0 ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_euler''' )
__lowercase = '''A painting of a squirrel eating a burger'''
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe([prompt] ,generator=lowercase__ ,guidance_scale=9.0 ,num_inference_steps=2_0 ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__lowercase = '''A painting of a squirrel eating a burger'''
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe(
[prompt] ,generator=lowercase__ ,guidance_scale=7.5 ,num_inference_steps=1_5 ,output_type='''np''' ,use_karras_sigmas=lowercase__ ,)
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = len(A__ )
for i in range(n - 1 ):
for j in range(i + 1 , A__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _A ( A__ ):
"""simple docstring"""
if len(A__ ) <= 1:
return arr, 0
__lowercase = len(A__ ) // 2
__lowercase = arr[0:mid]
__lowercase = arr[mid:]
__lowercase , __lowercase = count_inversions_recursive(A__ )
__lowercase , __lowercase = count_inversions_recursive(A__ )
__lowercase , __lowercase = _count_cross_inversions(A__ , A__ )
__lowercase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = __lowercase = __lowercase = 0
while i < len(A__ ) and j < len(A__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _A ( ):
"""simple docstring"""
__lowercase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowercase = count_inversions_bf(A__ )
__lowercase , __lowercase = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , A__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowercase = count_inversions_bf(A__ )
__lowercase , __lowercase = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , A__ )
# an empty list should also have zero inversions
__lowercase = []
__lowercase = count_inversions_bf(A__ )
__lowercase , __lowercase = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , A__ )
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = XLNetTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = XLNetTokenizerFast
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = XLNetTokenizer(lowercase__ ,keep_accents=lowercase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''<s>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) ,lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<unk>''' )
self.assertEqual(vocab_keys[1] ,'''<s>''' )
self.assertEqual(vocab_keys[-1] ,'''<eod>''' )
self.assertEqual(len(lowercase__ ) ,1_0_0_6 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = XLNetTokenizer(lowercase__ ,keep_accents=lowercase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
__lowercase = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = XLNetTokenizer(lowercase__ ,do_lower_case=lowercase__ )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''▁he''', '''ll''', '''o'''] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = XLNetTokenizer(lowercase__ ,do_lower_case=lowercase__ )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
__lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ ,lowercase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
# fmt: off
__lowercase = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ ,model_name='''xlnet-base-cased''' ,revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' ,)
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
lowerCAmelCase__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any]="<s>" ,lowercase__ : str="</s>" ,lowercase__ : Union[str, Any]="</s>" ,lowercase__ : List[Any]="<s>" ,lowercase__ : List[str]="<unk>" ,lowercase__ : Dict="<pad>" ,lowercase__ : int="<mask>" ,lowercase__ : Tuple=None ,lowercase__ : List[str]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[Dict[str, Any]] = None ,lowercase__ : Any=None ,lowercase__ : Any=False ,**lowercase__ : Optional[int] ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowercase = legacy_behaviour
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,cls_token=lowercase__ ,pad_token=lowercase__ ,mask_token=lowercase__ ,tokenizer_file=lowercase__ ,src_lang=lowercase__ ,tgt_lang=lowercase__ ,additional_special_tokens=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,legacy_behaviour=lowercase__ ,**lowercase__ ,)
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
__lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase = 1
__lowercase = len(self.sp_model )
__lowercase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
__lowercase = {v: k for k, v in self.lang_code_to_id.items()}
__lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowercase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__lowercase = src_lang if src_lang is not None else '''eng_Latn'''
__lowercase = self.lang_code_to_id[self._src_lang]
__lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[int] ):
__lowercase = self.__dict__.copy()
__lowercase = None
__lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,lowercase__ : List[str] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
__lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ,lowercase__ : str ,lowercase__ : Optional[str] ,lowercase__ : Optional[str] ,**lowercase__ : str ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__lowercase = src_lang
__lowercase = self(lowercase__ ,add_special_tokens=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
__lowercase = self.convert_tokens_to_ids(lowercase__ )
__lowercase = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ):
__lowercase = ''''''.join(lowercase__ ).replace(lowercase__ ,''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : str = "eng_Latn" ,lowercase__ : Optional[List[str]] = None ,lowercase__ : str = "fra_Latn" ,**lowercase__ : Union[str, Any] ,):
__lowercase = src_lang
__lowercase = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ ,lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase = [self.cur_lang_code]
__lowercase = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ):
__lowercase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase = [self.cur_lang_code]
__lowercase = [self.eos_token_id]
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
if index == number_of_items:
return 0
__lowercase = 0
__lowercase = 0
__lowercase = knapsack(A__ , A__ , A__ , A__ , index + 1 )
if weights[index] <= max_weight:
__lowercase = values[index] + knapsack(
A__ , A__ , A__ , max_weight - weights[index] , index + 1 )
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''PoolFormerFeatureExtractor''']
lowerCAmelCase__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 41 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'detr'
SCREAMING_SNAKE_CASE : List[str] = ['past_key_values']
SCREAMING_SNAKE_CASE : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int ,lowercase__ : Dict=True ,lowercase__ : Optional[Any]=None ,lowercase__ : str=3 ,lowercase__ : Optional[int]=1_0_0 ,lowercase__ : Optional[int]=6 ,lowercase__ : Optional[Any]=2_0_4_8 ,lowercase__ : Any=8 ,lowercase__ : int=6 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Any=8 ,lowercase__ : List[str]=0.0 ,lowercase__ : Union[str, Any]=0.0 ,lowercase__ : Union[str, Any]=True ,lowercase__ : str="relu" ,lowercase__ : int=2_5_6 ,lowercase__ : int=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : Tuple=0.0 ,lowercase__ : int=0.0_2 ,lowercase__ : Dict=1.0 ,lowercase__ : str=False ,lowercase__ : Union[str, Any]="sine" ,lowercase__ : List[str]="resnet50" ,lowercase__ : Dict=True ,lowercase__ : List[Any]=False ,lowercase__ : Union[str, Any]=1 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : Tuple=2 ,lowercase__ : str=1 ,lowercase__ : List[Any]=1 ,lowercase__ : Dict=5 ,lowercase__ : Optional[Any]=2 ,lowercase__ : int=0.1 ,**lowercase__ : Dict ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase__ ,lowercase__ ):
__lowercase = backbone_config.get('''model_type''' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowercase__ )
# set timm attributes to None
__lowercase , __lowercase , __lowercase = None, None, None
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
super().__init__(is_encoder_decoder=lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ,lowercase__ : PretrainedConfig ,**lowercase__ : Any ):
return cls(backbone_config=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return 1_2
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'bit'
SCREAMING_SNAKE_CASE : str = ['preactivation', 'bottleneck']
SCREAMING_SNAKE_CASE : List[Any] = ['SAME', 'VALID']
def __init__( self : int ,lowercase__ : Optional[int]=3 ,lowercase__ : List[str]=6_4 ,lowercase__ : Union[str, Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,lowercase__ : str=[3, 4, 6, 3] ,lowercase__ : Optional[Any]="preactivation" ,lowercase__ : int="relu" ,lowercase__ : Optional[int]=None ,lowercase__ : List[str]=3_2 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : int=False ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : str=1 ,lowercase__ : Tuple=None ,lowercase__ : Tuple=None ,**lowercase__ : Tuple ,):
super().__init__(**lowercase__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowercase = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
__lowercase = num_channels
__lowercase = embedding_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = layer_type
__lowercase = hidden_act
__lowercase = global_padding
__lowercase = num_groups
__lowercase = drop_path_rate
__lowercase = embedding_dynamic_padding
__lowercase = output_stride
__lowercase = width_factor
__lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(lowercase__ ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,**lowercase__ : int ):
__lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowercase__ )
return config
def SCREAMING_SNAKE_CASE ( self : Any ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] ,[0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase__ ,beta_end=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
self.check_over_configs(thresholding=lowercase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase__ ,prediction_type=lowercase__ ,sample_max_value=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
__lowercase = len(lowercase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(lowercase__ ) ):
# 1. predict noise residual
__lowercase = model(lowercase__ ,lowercase__ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,generator=lowercase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase = scheduler_class(**lowercase__ )
__lowercase = len(lowercase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(lowercase__ ) ):
# 1. predict noise residual
__lowercase = model(lowercase__ ,lowercase__ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,generator=lowercase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
__lowercase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase__ )
__lowercase = scheduler.timesteps
for i, timestep in enumerate(lowercase__ ):
if i == len(lowercase__ ) - 1:
__lowercase = -1
else:
__lowercase = timesteps[i + 1]
__lowercase = scheduler.previous_timestep(lowercase__ )
__lowercase = prev_t.item()
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
__lowercase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase__ ,msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
__lowercase = [1_0_0, 8_7, 5_0, 1, 0]
__lowercase = len(lowercase__ )
with self.assertRaises(lowercase__ ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowercase__ ,timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
__lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase__ ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=lowercase__ )
| 41 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCAmelCase__ = 0
lowerCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCAmelCase__ = tuple[int, int]
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : int ,lowercase__ : int ,lowercase__ : int ,lowercase__ : int ,lowercase__ : Node | None ,):
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase__ ) + abs(lowercase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Dict ,lowercase__ : Node ):
return self.f_cost < other.f_cost
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : TPosition ,lowercase__ : TPosition ):
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,lowercase__ )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9_9_9_9 ,lowercase__ )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : str ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase__ )
self.closed_nodes.append(lowercase__ )
__lowercase = self.get_successors(lowercase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase__ )
else:
self.open_nodes.append(lowercase__ )
return [self.start.pos]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Node ):
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase__ ,lowercase__ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,lowercase__ ,) )
return successors
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Node | None ):
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : TPosition ,lowercase__ : TPosition ):
__lowercase = AStar(lowercase__ ,lowercase__ )
__lowercase = AStar(lowercase__ ,lowercase__ )
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase__ ,lowercase__ )
self.fwd_astar.closed_nodes.append(lowercase__ )
self.bwd_astar.closed_nodes.append(lowercase__ )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase__ )
else:
astar.open_nodes.append(lowercase__ )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Node ,lowercase__ : Node ):
__lowercase = self.fwd_astar.retrace_path(lowercase__ )
__lowercase = self.bwd_astar.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = AStar(init, goal)
lowerCAmelCase__ = a_star.search()
lowerCAmelCase__ = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = BidirectionalAStar(init, goal)
lowerCAmelCase__ = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowerCAmelCase__ = model.state_dict()
lowerCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase__ = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
lowerCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
lowerCAmelCase__ = state_dict['''cls.predictions.decoder.weight''']
lowerCAmelCase__ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[f'cls.predictions.transform.dense.{w}']
lowerCAmelCase__ = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 41 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'deberta-v2'
def __init__( self : int ,lowercase__ : Optional[Any]=1_2_8_1_0_0 ,lowercase__ : str=1_5_3_6 ,lowercase__ : List[Any]=2_4 ,lowercase__ : Union[str, Any]=2_4 ,lowercase__ : Optional[Any]=6_1_4_4 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Tuple=0.1 ,lowercase__ : Dict=5_1_2 ,lowercase__ : List[str]=0 ,lowercase__ : int=0.0_2 ,lowercase__ : str=1e-7 ,lowercase__ : Any=False ,lowercase__ : Union[str, Any]=-1 ,lowercase__ : Optional[int]=0 ,lowercase__ : List[str]=True ,lowercase__ : List[str]=None ,lowercase__ : Optional[int]=0 ,lowercase__ : str="gelu" ,**lowercase__ : Optional[Any] ,):
super().__init__(**lowercase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = relative_attention
__lowercase = max_relative_positions
__lowercase = pad_token_id
__lowercase = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
__lowercase = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__lowercase = pos_att_type
__lowercase = vocab_size
__lowercase = layer_norm_eps
__lowercase = kwargs.get('''pooler_hidden_size''' ,lowercase__ )
__lowercase = pooler_dropout
__lowercase = pooler_hidden_act
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return 1_2
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional["TensorType"] = None ,lowercase__ : int = 3 ,lowercase__ : int = 4_0 ,lowercase__ : int = 4_0 ,lowercase__ : "PreTrainedTokenizerBase" = None ,):
__lowercase = super().generate_dummy_inputs(preprocessor=lowercase__ ,framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 41 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 1 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*lowercase__ : Optional[int] ,**lowercase__ : List[str] ):
super().__init__(*lowercase__ ,**lowercase__ )
self.check_model_type(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=None ,**lowercase__ : List[str] ):
__lowercase , __lowercase = {}, {}
if padding is not None:
__lowercase = padding
if truncation is not None:
__lowercase = truncation
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] ,lowercase__ : Union["Image.Image", str] ,lowercase__ : str = None ,**lowercase__ : Any ):
if isinstance(lowercase__ ,(Image.Image, str) ) and isinstance(lowercase__ ,lowercase__ ):
__lowercase = {'''image''': image, '''question''': question}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=False ,lowercase__ : List[str]=False ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = self.tokenizer(
inputs['''question'''] ,return_tensors=self.framework ,padding=lowercase__ ,truncation=lowercase__ )
__lowercase = self.image_processor(images=lowercase__ ,return_tensors=self.framework )
model_inputs.update(lowercase__ )
return model_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ):
__lowercase = self.model(**lowercase__ )
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Any=5 ):
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.sigmoid()[0]
__lowercase , __lowercase = probs.topk(lowercase__ )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase__ ,lowercase__ )]
| 41 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 1 |
'''simple docstring'''
def _A ( ):
"""simple docstring"""
__lowercase = 0
for i in range(1 , 1001 ):
total += i**i
return str(A__ )[-10:]
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = 9, 14 # noqa: F841
__lowercase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowercase = defaultdict(A__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowercase = mst(A__ )
__lowercase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowercase = tuple(answer[:2] )
__lowercase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = '''Muhammad Umer Farooq'''
lowerCAmelCase__ = '''MIT'''
lowerCAmelCase__ = '''1.0.0'''
lowerCAmelCase__ = '''Muhammad Umer Farooq'''
lowerCAmelCase__ = '''[email protected]'''
lowerCAmelCase__ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : str ):
super().__init__()
__lowercase = []
__lowercase = domain
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowercase = parse.urljoin(self.domain ,lowercase__ )
self.urls.append(lowercase__ )
def _A ( A__ ):
"""simple docstring"""
return ".".join(get_sub_domain_name(A__ ).split('''.''' )[-2:] )
def _A ( A__ ):
"""simple docstring"""
return parse.urlparse(A__ ).netloc
def _A ( A__ = "https://github.com" ):
"""simple docstring"""
__lowercase = get_domain_name(A__ )
# Initialize the parser
__lowercase = Parser(A__ )
try:
# Open URL
__lowercase = requests.get(A__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowercase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowercase = requests.get(A__ )
# Get the valid email.
__lowercase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(A__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = emails_from_url('''https://github.com''')
print(f'{len(emails)} emails found:')
print('''\n'''.join(sorted(emails)))
| 41 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : "DiagonalGaussianDistribution"
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = True
@register_to_config
def __init__( self : Optional[int] ,lowercase__ : int = 3 ,lowercase__ : int = 3 ,lowercase__ : Tuple[str] = ("DownEncoderBlock2D",) ,lowercase__ : Tuple[str] = ("UpDecoderBlock2D",) ,lowercase__ : Tuple[int] = (6_4,) ,lowercase__ : int = 1 ,lowercase__ : str = "silu" ,lowercase__ : int = 4 ,lowercase__ : int = 3_2 ,lowercase__ : int = 3_2 ,lowercase__ : float = 0.1_8_2_1_5 ,):
super().__init__()
# pass init params to Encoder
__lowercase = Encoder(
in_channels=lowercase__ ,out_channels=lowercase__ ,down_block_types=lowercase__ ,block_out_channels=lowercase__ ,layers_per_block=lowercase__ ,act_fn=lowercase__ ,norm_num_groups=lowercase__ ,double_z=lowercase__ ,)
# pass init params to Decoder
__lowercase = Decoder(
in_channels=lowercase__ ,out_channels=lowercase__ ,up_block_types=lowercase__ ,block_out_channels=lowercase__ ,layers_per_block=lowercase__ ,norm_num_groups=lowercase__ ,act_fn=lowercase__ ,)
__lowercase = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
__lowercase = nn.Convad(lowercase__ ,lowercase__ ,1 )
__lowercase = False
__lowercase = False
# only relevant if vae tiling is enabled
__lowercase = self.config.sample_size
__lowercase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
__lowercase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowercase = 0.2_5
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : str=False ):
if isinstance(lowercase__ ,(Encoder, Decoder) ):
__lowercase = value
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : bool = True ):
__lowercase = use_tiling
def SCREAMING_SNAKE_CASE ( self : int ):
self.enable_tiling(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = {}
def fn_recursive_add_processors(lowercase__ : str ,lowercase__ : torch.nn.Module ,lowercase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowercase__ ,'''set_processor''' ):
__lowercase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" ,lowercase__ ,lowercase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase__ ,lowercase__ ,lowercase__ )
return processors
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__lowercase = len(self.attn_processors.keys() )
if isinstance(lowercase__ ,lowercase__ ) and len(lowercase__ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowercase__ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowercase__ : str ,lowercase__ : torch.nn.Module ,lowercase__ : Tuple ):
if hasattr(lowercase__ ,'''set_processor''' ):
if not isinstance(lowercase__ ,lowercase__ ):
module.set_processor(lowercase__ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" ,lowercase__ ,lowercase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowercase__ ,return_dict=lowercase__ )
if self.use_slicing and x.shape[0] > 1:
__lowercase = [self.encoder(lowercase__ ) for x_slice in x.split(1 )]
__lowercase = torch.cat(lowercase__ )
else:
__lowercase = self.encoder(lowercase__ )
__lowercase = self.quant_conv(lowercase__ )
__lowercase = DiagonalGaussianDistribution(lowercase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowercase__ ,return_dict=lowercase__ )
__lowercase = self.post_quant_conv(lowercase__ )
__lowercase = self.decoder(lowercase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__ )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__lowercase = [self._decode(lowercase__ ).sample for z_slice in z.split(1 )]
__lowercase = torch.cat(lowercase__ )
else:
__lowercase = self._decode(lowercase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Tuple ):
__lowercase = min(a.shape[2] ,b.shape[2] ,lowercase__ )
for y in range(lowercase__ ):
__lowercase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ):
__lowercase = min(a.shape[3] ,b.shape[3] ,lowercase__ )
for x in range(lowercase__ ):
__lowercase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
__lowercase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowercase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowercase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowercase = []
for i in range(0 ,x.shape[2] ,lowercase__ ):
__lowercase = []
for j in range(0 ,x.shape[3] ,lowercase__ ):
__lowercase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowercase = self.encoder(lowercase__ )
__lowercase = self.quant_conv(lowercase__ )
row.append(lowercase__ )
rows.append(lowercase__ )
__lowercase = []
for i, row in enumerate(lowercase__ ):
__lowercase = []
for j, tile in enumerate(lowercase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase = self.blend_v(rows[i - 1][j] ,lowercase__ ,lowercase__ )
if j > 0:
__lowercase = self.blend_h(row[j - 1] ,lowercase__ ,lowercase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowercase__ ,dim=3 ) )
__lowercase = torch.cat(lowercase__ ,dim=2 )
__lowercase = DiagonalGaussianDistribution(lowercase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
__lowercase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowercase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowercase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowercase = []
for i in range(0 ,z.shape[2] ,lowercase__ ):
__lowercase = []
for j in range(0 ,z.shape[3] ,lowercase__ ):
__lowercase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowercase = self.post_quant_conv(lowercase__ )
__lowercase = self.decoder(lowercase__ )
row.append(lowercase__ )
rows.append(lowercase__ )
__lowercase = []
for i, row in enumerate(lowercase__ ):
__lowercase = []
for j, tile in enumerate(lowercase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase = self.blend_v(rows[i - 1][j] ,lowercase__ ,lowercase__ )
if j > 0:
__lowercase = self.blend_h(row[j - 1] ,lowercase__ ,lowercase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowercase__ ,dim=3 ) )
__lowercase = torch.cat(lowercase__ ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = False ,lowercase__ : bool = True ,lowercase__ : Optional[torch.Generator] = None ,):
__lowercase = sample
__lowercase = self.encode(lowercase__ ).latent_dist
if sample_posterior:
__lowercase = posterior.sample(generator=lowercase__ )
else:
__lowercase = posterior.mode()
__lowercase = self.decode(lowercase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__ )
| 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = []
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for i in range(len(A__ ) ):
if board[row][i] == 1:
return False
for i in range(len(A__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A__ , -1 , -1 ) , range(A__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A__ , -1 , -1 ) , range(A__ , len(A__ ) ) ):
if board[i][j] == 1:
return False
return True
def _A ( A__ , A__ ):
"""simple docstring"""
if row >= len(A__ ):
solution.append(A__ )
printboard(A__ )
print()
return True
for i in range(len(A__ ) ):
if is_safe(A__ , A__ , A__ ):
__lowercase = 1
solve(A__ , row + 1 )
__lowercase = 0
return False
def _A ( A__ ):
"""simple docstring"""
for i in range(len(A__ ) ):
for j in range(len(A__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase__ = 8
lowerCAmelCase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Dict ,**lowercase__ : List[str] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Any ):
__lowercase = ObjectDetectionPipeline(model=lowercase__ ,image_processor=lowercase__ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : str ):
__lowercase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ,threshold=0.0 )
self.assertGreater(len(lowercase__ ) ,0 )
for detected_object in outputs:
self.assertEqual(
lowercase__ ,{
'''score''': ANY(lowercase__ ),
'''label''': ANY(lowercase__ ),
'''box''': {'''xmin''': ANY(lowercase__ ), '''ymin''': ANY(lowercase__ ), '''xmax''': ANY(lowercase__ ), '''ymax''': ANY(lowercase__ )},
} ,)
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__lowercase = object_detector(lowercase__ ,threshold=0.0 )
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase__ ) ,0 )
for detected_object in outputs:
self.assertEqual(
lowercase__ ,{
'''score''': ANY(lowercase__ ),
'''label''': ANY(lowercase__ ),
'''box''': {'''xmin''': ANY(lowercase__ ), '''ymin''': ANY(lowercase__ ), '''xmax''': ANY(lowercase__ ), '''ymax''': ANY(lowercase__ )},
} ,)
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__lowercase = AutoModelForObjectDetection.from_pretrained(lowercase__ )
__lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
__lowercase = ObjectDetectionPipeline(model=lowercase__ ,feature_extractor=lowercase__ )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase__ ,decimals=4 ) ,[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(lowercase__ ,decimals=4 ) ,[
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] ,)
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = AutoModelForObjectDetection.from_pretrained(lowercase__ )
__lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
__lowercase = ObjectDetectionPipeline(model=lowercase__ ,feature_extractor=lowercase__ )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowercase__ ,decimals=4 ) ,[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowercase__ ,decimals=4 ) ,[
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] ,)
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = pipeline('''object-detection''' ,model=lowercase__ )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowercase__ ,decimals=4 ) ,[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowercase__ ,decimals=4 ) ,[
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] ,)
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = 0.9_9_8_5
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = pipeline('''object-detection''' ,model=lowercase__ )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,threshold=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ,decimals=4 ) ,[
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] ,)
@require_torch
@require_pytesseract
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = '''Narsil/layoutlmv3-finetuned-funsd'''
__lowercase = 0.9_9_9_3
__lowercase = pipeline('''object-detection''' ,model=lowercase__ ,threshold=lowercase__ )
__lowercase = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowercase__ ,decimals=4 ) ,[
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] ,)
| 41 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
set_seed(770)
lowerCAmelCase__ = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase__ = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase__ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase__ = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase__ = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def _A ( A__ , A__=False ):
"""simple docstring"""
__lowercase = model_type
if use_small:
key += "_small"
return os.path.join(A__ , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def _A ( A__ , A__ ):
"""simple docstring"""
os.makedirs(A__ , exist_ok=A__ )
hf_hub_download(repo_id=A__ , filename=A__ , local_dir=A__ )
def _A ( A__ , A__ , A__=False , A__="text" ):
"""simple docstring"""
if model_type == "text":
__lowercase = BarkSemanticModel
__lowercase = BarkSemanticConfig
__lowercase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase = BarkCoarseModel
__lowercase = BarkCoarseConfig
__lowercase = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase = BarkFineModel
__lowercase = BarkFineConfig
__lowercase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase = F"{model_type}_small" if use_small else model_type
__lowercase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A__ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__lowercase = torch.load(A__ , map_location=A__ )
# this is a hack
__lowercase = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__lowercase = model_args['''vocab_size''']
__lowercase = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase = model_args.pop('''n_head''' )
__lowercase = model_args.pop('''n_embd''' )
__lowercase = model_args.pop('''n_layer''' )
__lowercase = ConfigClass(**checkpoint['''model_args'''] )
__lowercase = ModelClass(config=A__ )
__lowercase = GenerationConfigClass()
__lowercase = model_generation_config
__lowercase = checkpoint['''model''']
# fixup checkpoint
__lowercase = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(A__ ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase = k[len(A__ ) :]
for old_layer_name in new_layer_name_dict:
__lowercase = new_k.replace(A__ , new_layer_name_dict[old_layer_name] )
__lowercase = state_dict.pop(A__ )
__lowercase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__lowercase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(A__ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(A__ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(A__ , strict=A__ )
__lowercase = model.num_parameters(exclude_embeddings=A__ )
__lowercase = checkpoint['''best_val_loss'''].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(A__ , 3 )} loss" )
model.eval()
model.to(A__ )
del checkpoint, state_dict
return model
def _A ( A__ , A__=False , A__="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase = '''cpu''' # do conversion on cpu
__lowercase = _get_ckpt_path(A__ , use_small=A__ )
__lowercase = _load_model(A__ , A__ , model_type=A__ , use_small=A__ )
# load bark initial model
__lowercase = _bark_load_model(A__ , '''cpu''' , model_type=A__ , use_small=A__ )
if model_type == "text":
__lowercase = bark_model['''model''']
if model.num_parameters(exclude_embeddings=A__ ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__lowercase = 5
__lowercase = 10
if model_type in ["text", "coarse"]:
__lowercase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase = bark_model(A__ )[0]
__lowercase = model(A__ )
# take last logits
__lowercase = output_new_model_total.logits[:, [-1], :]
else:
__lowercase = 3
__lowercase = 8
__lowercase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase = model(A__ , A__ )
__lowercase = bark_model(A__ , A__ )
__lowercase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , ):
"""simple docstring"""
__lowercase = os.path.join(A__ , A__ )
__lowercase = BarkSemanticConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = BarkCoarseConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = BarkFineConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__lowercase = BarkSemanticModel.from_pretrained(A__ )
__lowercase = BarkCoarseModel.from_pretrained(A__ )
__lowercase = BarkFineModel.from_pretrained(A__ )
__lowercase = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__lowercase = BarkConfig.from_sub_model_configs(
A__ , A__ , A__ , A__ )
__lowercase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase = BarkModel(A__ )
__lowercase = semantic
__lowercase = coarseAcoustic
__lowercase = fineAcoustic
__lowercase = codec
__lowercase = bark_generation_config
Path(A__ ).mkdir(exist_ok=A__ )
bark.save_pretrained(A__ , repo_id=A__ , push_to_hub=A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __lt__( self : Any ,lowercase__ : List[Any] ):
return self[-1] < other[-1]
def __eq__( self : Dict ,lowercase__ : Union[str, Any] ):
return self[-1] == other[-1]
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
# sort into stacks
for element in collection:
__lowercase = Stack([element] )
__lowercase = bisect_left(A__ , A__ )
if i != len(A__ ):
stacks[i].append(A__ )
else:
stacks.append(A__ )
# use a heap-based merge to merge stack efficiently
__lowercase = merge(*(reversed(A__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 41 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='''utf-8''' ,check=lowercase__ ,)
assert hasattr(self ,'''env''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ):
__lowercase = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
__lowercase = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=lowercase__ ,instance_count=lowercase__ ,instance_type=self.instance_type ,debugger_hook_config=lowercase__ ,hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=lowercase__ ,py_version='''py36''' ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[int] ):
TrainingJobAnalytics(lowercase__ ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
# create estimator
__lowercase = self.create_estimator(lowercase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,lowercase__ )
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 1 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _A ( A__ , A__ ):
"""simple docstring"""
for e in env_keys:
__lowercase = int(os.environ.get(A__ , -1 ) )
if val >= 0:
return val
return default
def _A ( A__ , A__=False ):
"""simple docstring"""
__lowercase = os.environ.get(A__ , str(A__ ) )
return strtobool(A__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _A ( A__ , A__="no" ):
"""simple docstring"""
__lowercase = os.environ.get(A__ , str(A__ ) )
return value
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _A ( A__ = 3 ):
"""simple docstring"""
if isinstance(A__ , A__ ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(A__ ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
__lowercase = QuantumRegister(A__ , '''qr''' )
__lowercase = ClassicalRegister(A__ , '''cr''' )
__lowercase = QuantumCircuit(A__ , A__ )
__lowercase = number_of_qubits
for i in range(A__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(A__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , A__ , A__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(A__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(A__ , A__ )
# simulate with 10000 shots
__lowercase = Aer.get_backend('''qasm_simulator''' )
__lowercase = execute(A__ , A__ , shots=10000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCAmelCase__ = '''▁'''
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any]="</s>" ,lowercase__ : Union[str, Any]="<unk>" ,lowercase__ : int="<pad>" ,lowercase__ : Dict=1_0_0 ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[Dict[str, Any]] = None ,lowercase__ : Optional[Any]=True ,**lowercase__ : Optional[int] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase = [F"<extra_id_{i}>" for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowercase = len(set(filter(lambda lowercase__ : bool('''extra_id''' in str(lowercase__ ) ) ,lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
__lowercase = legacy
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,extra_ids=lowercase__ ,additional_special_tokens=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,legacy=lowercase__ ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = extra_ids
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' ,lowercase__ ,)
return max_model_length
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return list(
set(filter(lambda lowercase__ : bool(re.search(r'''<extra_id_\d+>''' ,lowercase__ ) ) is not None ,self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
__lowercase = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __getstate__( self : List[str] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : List[Any] ,lowercase__ : List[Any] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : "TextInput" ,**lowercase__ : Any ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__lowercase = SPIECE_UNDERLINE + text.replace(lowercase__ ,''' ''' )
return super().tokenize(lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,**lowercase__ : str ):
if not self.legacy:
__lowercase = text.startswith(lowercase__ )
if is_first:
__lowercase = text[1:]
__lowercase = self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(lowercase__ ):
__lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ):
if token.startswith('''<extra_id_''' ):
__lowercase = re.match(r'''<extra_id_(\d+)>''' ,lowercase__ )
__lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Any ):
if index < self.sp_model.get_piece_size():
__lowercase = self.sp_model.IdToPiece(lowercase__ )
else:
__lowercase = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Any ):
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
__lowercase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
'''simple docstring'''
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if curr_ind == len(A__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(A__ ) ):
if valid_connection(A__ , A__ , A__ , A__ ):
# Insert current vertex into path as next transition
__lowercase = next_ver
# Validate created path
if util_hamilton_cycle(A__ , A__ , curr_ind + 1 ):
return True
# Backtrack
__lowercase = -1
return False
def _A ( A__ , A__ = 0 ):
"""simple docstring"""
__lowercase = [-1] * (len(A__ ) + 1)
# initialize start and end of path with starting index
__lowercase = __lowercase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(A__ , A__ , 1 ) else []
| 41 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 1 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def _A ( A__ ):
"""simple docstring"""
__lowercase = None
# source code of `config_class`
__lowercase = inspect.getsource(A__ )
__lowercase = _re_checkpoint.findall(A__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowercase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowercase = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__lowercase = ckpt_name
break
return checkpoint
def _A ( ):
"""simple docstring"""
__lowercase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowercase = get_checkpoint_from_config_class(A__ )
__lowercase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A__ )
if len(A__ ) > 0:
__lowercase = '''\n'''.join(sorted(A__ ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 1 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = CodeGenTokenizer
SCREAMING_SNAKE_CASE : List[Any] = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Dict = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase = {'''unk_token''': '''<unk>'''}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : str ,**lowercase__ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,**lowercase__ : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any] ):
__lowercase = '''lower newer'''
__lowercase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__lowercase = '''lower newer'''
__lowercase = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowercase = tokenizer.tokenize(lowercase__ ,add_prefix_space=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
__lowercase = '''lower newer'''
# Testing tokenization
__lowercase = tokenizer.tokenize(lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing conversion to ids without special tokens
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing conversion to ids with special tokens
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing the unknown token
__lowercase = tokens + [rust_tokenizer.unk_token]
__lowercase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,*lowercase__ : str ,**lowercase__ : Optional[int] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token='''<pad>''' )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input looooooooong''', '''This is a simple input''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer(lowercase__ ,padding='''max_length''' ,max_length=3_0 ,return_tensors='''np''' )
__lowercase = tokenizer(lowercase__ ,padding=lowercase__ ,truncate=lowercase__ ,return_tensors='''np''' )
__lowercase = tokenizer(*lowercase__ ,padding='''max_length''' ,max_length=6_0 ,return_tensors='''np''' )
__lowercase = tokenizer(lowercase__ ,padding=lowercase__ ,truncate=lowercase__ ,return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] ,3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] ,3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] ,6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] ,5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''$$$'''
__lowercase = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowercase__ ,add_bos_token=lowercase__ )
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer(lowercase__ )
__lowercase = tokenizer(lowercase__ )
self.assertEqual(out_s.input_ids[0] ,lowercase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowercase = tokenizer.decode(out_s.input_ids )
__lowercase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowercase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
__lowercase = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
__lowercase = '''\nif len_a > len_b: result = a\nelse: result = b'''
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
__lowercase = tokenizer.decode(lowercase__ ,truncate_before_pattern=lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _A ( A__ ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def _A ( A__ ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__lowercase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 41 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowerCAmelCase__ = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 41 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : Dict ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[Any]=3 ,lowercase__ : List[Any]=3_0 ,lowercase__ : List[Any]=4_0_0 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=None ,lowercase__ : Optional[Any]=0.9 ,lowercase__ : List[str]=None ,lowercase__ : str=True ,lowercase__ : Dict=[0.5, 0.5, 0.5] ,lowercase__ : Dict=[0.5, 0.5, 0.5] ,):
__lowercase = size if size is not None else {'''shortest_edge''': 3_0}
__lowercase = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize_and_center_crop
__lowercase = size
__lowercase = crop_pct
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def SCREAMING_SNAKE_CASE ( self : str ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = PoolFormerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size''' ) )
self.assertTrue(hasattr(lowercase__ ,'''crop_pct''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ ,'''image_std''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size ,{'''height''': 3_0, '''width''': 3_0} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def SCREAMING_SNAKE_CASE ( self : int ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 41 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
def __init__( self : Tuple ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PIL.Image.BICUBIC ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,**lowercase__ : Optional[Any] ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__lowercase = get_size_dict(lowercase__ )
__lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PIL.Image.BICUBIC ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
lowercase__ ,size=(size['''height'''], size['''width''']) ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Union[str, Any] ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[int, float] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Any ,):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : str=None ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : bool = None ,lowercase__ : float = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : Union[str, Any] ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ )
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 41 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : str = 'OwlViTImageProcessor'
SCREAMING_SNAKE_CASE : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Any ,lowercase__ : str=None ,lowercase__ : Any=None ,**lowercase__ : List[Any] ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase__ ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : str ,lowercase__ : Tuple=None ,lowercase__ : List[Any]=None ,lowercase__ : Dict=None ,lowercase__ : List[str]="max_length" ,lowercase__ : List[Any]="np" ,**lowercase__ : Dict ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(lowercase__ ,lowercase__ ) or (isinstance(lowercase__ ,lowercase__ ) and not isinstance(text[0] ,lowercase__ )):
__lowercase = [self.tokenizer(lowercase__ ,padding=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )]
elif isinstance(lowercase__ ,lowercase__ ) and isinstance(text[0] ,lowercase__ ):
__lowercase = []
# Maximum number of queries across batch
__lowercase = max([len(lowercase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase__ ) != max_num_queries:
__lowercase = t + [''' '''] * (max_num_queries - len(lowercase__ ))
__lowercase = self.tokenizer(lowercase__ ,padding=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
encodings.append(lowercase__ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
__lowercase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
__lowercase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
__lowercase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase = torch.cat([encoding['''input_ids'''] for encoding in encodings] ,dim=0 )
__lowercase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase = tf.stack([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
__lowercase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
__lowercase = BatchEncoding()
__lowercase = input_ids
__lowercase = attention_mask
if query_images is not None:
__lowercase = BatchEncoding()
__lowercase = self.image_processor(
lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ).pixel_values
__lowercase = query_pixel_values
if images is not None:
__lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,*lowercase__ : Optional[int] ,**lowercase__ : Tuple ):
return self.image_processor.post_process(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,*lowercase__ : List[Any] ,**lowercase__ : int ):
return self.image_processor.post_process_object_detection(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,*lowercase__ : List[str] ,**lowercase__ : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,*lowercase__ : str ,**lowercase__ : Union[str, Any] ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,*lowercase__ : List[Any] ,**lowercase__ : Dict ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase__ ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase__ ,)
return self.image_processor
| 41 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase__ )
__lowercase = -1
__lowercase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowercase__ )
__lowercase = model.generate(lowercase__ ,max_new_tokens=1_0 ,do_sample=lowercase__ )
__lowercase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase = TextStreamer(lowercase__ )
model.generate(lowercase__ ,max_new_tokens=1_0 ,do_sample=lowercase__ ,streamer=lowercase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase = cs.out[:-1]
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase__ )
__lowercase = -1
__lowercase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowercase__ )
__lowercase = model.generate(lowercase__ ,max_new_tokens=1_0 ,do_sample=lowercase__ )
__lowercase = tokenizer.decode(greedy_ids[0] )
__lowercase = TextIteratorStreamer(lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
__lowercase = Thread(target=model.generate ,kwargs=lowercase__ )
thread.start()
__lowercase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase__ )
__lowercase = -1
__lowercase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowercase__ )
__lowercase = model.generate(lowercase__ ,max_new_tokens=1_0 ,do_sample=lowercase__ )
__lowercase = greedy_ids[:, input_ids.shape[1] :]
__lowercase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase = TextStreamer(lowercase__ ,skip_prompt=lowercase__ )
model.generate(lowercase__ ,max_new_tokens=1_0 ,do_sample=lowercase__ ,streamer=lowercase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase = cs.out[:-1]
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__lowercase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowercase__ )
__lowercase = -1
__lowercase = torch.ones((1, 5) ,device=lowercase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__lowercase = TextStreamer(lowercase__ ,skip_special_tokens=lowercase__ )
model.generate(lowercase__ ,max_new_tokens=1 ,do_sample=lowercase__ ,streamer=lowercase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__lowercase = cs.out[:-1] # Remove the final "\n"
__lowercase = tokenizer(lowercase__ ,return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase__ )
__lowercase = -1
__lowercase = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowercase__ )
__lowercase = TextIteratorStreamer(lowercase__ ,timeout=0.0_0_1 )
__lowercase = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
__lowercase = Thread(target=model.generate ,kwargs=lowercase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase__ ):
__lowercase = ''''''
for new_text in streamer:
streamer_text += new_text
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(A__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(A__ ) == 1:
return True
__lowercase = series[1] - series[0]
for index in range(len(A__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(A__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__lowercase = 0
for val in series:
answer += val
return answer / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 1 |
'''simple docstring'''
import numpy as np
import qiskit
def _A ( A__ = 8 , A__ = None ):
"""simple docstring"""
__lowercase = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowercase = 6 * key_len
# Measurement basis for Alice's qubits.
__lowercase = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
__lowercase = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
__lowercase = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
__lowercase = qiskit.QuantumCircuit(A__ , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowercase = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowercase = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
__lowercase = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowercase = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowercase = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '''0''' )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 41 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _A ( A__ ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=lowercase__ ,required=lowercase__ ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=lowercase__ ,required=lowercase__ ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=lowercase__ )
def __init__( self : Optional[int] ,lowercase__ : str ,lowercase__ : str ,*lowercase__ : Optional[int] ):
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(F"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(lowercase__ )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"Looking at file {f_name}" )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
if not os.path.isfile(lowercase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(lowercase__ ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda lowercase__ : e in out_line ,lowercase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase__ ) + '''\n''' )
out_lines.append(lowercase__ )
out_lines.append(lowercase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(lowercase__ ,lowercase__ ,lowercase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,lowercase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(lowercase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
self._logger.info(F"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowercase__ )
if needs_manual_update:
with_manual_update.append(lowercase__ )
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(lowercase__ )
self._logger.info(F"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(lowercase__ )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(F"Moving {dest_folder} to {utils_file}" )
shutil.copy(lowercase__ ,lowercase__ )
except KeyError:
self._logger.error(F"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _A ( A__ , A__ , A__=None , A__=None ):
"""simple docstring"""
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OPTConfig
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : Optional[int] = 'gelu'
def __init__( self : int ,lowercase__ : Any ,lowercase__ : Tuple=1_3 ,lowercase__ : int=7 ,lowercase__ : str=True ,lowercase__ : Any=False ,lowercase__ : Optional[Any]=9_9 ,lowercase__ : List[Any]=1_6 ,lowercase__ : List[str]=2 ,lowercase__ : Tuple=4 ,lowercase__ : Tuple=4 ,lowercase__ : Any="gelu" ,lowercase__ : str=0.1 ,lowercase__ : str=0.1 ,lowercase__ : Union[str, Any]=2_0 ,lowercase__ : Union[str, Any]=2 ,lowercase__ : int=1 ,lowercase__ : List[Any]=0 ,lowercase__ : Optional[Any]=1_6 ,lowercase__ : Optional[int]=1_6 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = embed_dim
__lowercase = word_embed_proj_dim
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
__lowercase = tf.concat([input_ids, eos_tensor] ,axis=1 )
__lowercase = self.config_cls(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=lowercase__ ,**self.config_updates ,)
__lowercase = prepare_opt_inputs_dict(lowercase__ ,lowercase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ):
__lowercase = TFOPTModel(config=lowercase__ )
__lowercase = inputs_dict['''input_ids''']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['''attention_mask'''][:1, :]
__lowercase = 1
# first forward pass
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,use_cache=lowercase__ )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] ,axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ ,lowercase__ ,rtol=1e-3 )
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : List[str] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = 1_0
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = TFOPTModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase__ : Optional[int] ,lowercase__ : List[str] ):
if hasattr(lowercase__ ,'''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase__ ,'''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowercase = model_class(config=lowercase__ )
__lowercase = _get_word_embedding_weight(lowercase__ ,model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowercase__ ,model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase__ )
__lowercase = _get_word_embedding_weight(lowercase__ ,model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowercase__ ,model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] ,lowercase__ )
# check that weights remain the same after resizing
__lowercase = True
for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowercase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] ,lowercase__ )
__lowercase = True
for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowercase__ )
def _A ( A__ ):
"""simple docstring"""
return tf.constant(A__ , dtype=tf.intaa )
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 9_9
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tf.ones((4, 1) ,dtype=tf.intaa ) * 2
__lowercase = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 )
__lowercase = input_ids.shape[0]
__lowercase = OPTConfig(
vocab_size=self.vocab_size ,hidden_size=2_4 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=3_2 ,max_position_embeddings=4_8 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase = tf.not_equal(lowercase__ ,model.config.pad_token_id )
with tf.GradientTape():
__lowercase = model(input_ids=lowercase__ ,attention_mask=lowercase__ ).last_hidden_state
__lowercase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape ,lowercase__ )
__lowercase = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase__ ,atol=4e-3 ) )
__lowercase = tf.function(lowercase__ ,jit_compile=lowercase__ )
__lowercase = xla_generate(lowercase__ ,lowercase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase__ ,atol=4e-2 ) )
@require_tf
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
__lowercase = '''facebook/opt-350m'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase = tokenizer(lowercase__ ,return_tensors='''tf''' ,padding=lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
__lowercase = tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-4 ) )
__lowercase = tf.function(lowercase__ ,jit_compile=lowercase__ )
__lowercase = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-4 ) )
@require_tf
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''facebook/opt-125m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowercase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowercase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowercase__ ,return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowercase__ ,max_length=1_0 )
__lowercase = tokenizer.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )
predicted_outputs += generated_string
self.assertListEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = '''facebook/opt-350m'''
__lowercase = GPTaTokenizer.from_pretrained(lowercase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowercase__ )
__lowercase = '''left'''
# use different length sentences to test batching
__lowercase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase = tokenizer(lowercase__ ,return_tensors='''tf''' ,padding=lowercase__ )
__lowercase = inputs['''input_ids''']
__lowercase = model.generate(input_ids=lowercase__ ,attention_mask=inputs['''attention_mask'''] )
__lowercase = tokenizer(sentences[0] ,return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowercase__ )
__lowercase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] ,tf.intaa ) )
__lowercase = tokenizer(sentences[1] ,return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowercase__ ,max_length=model.config.max_length - num_paddings )
__lowercase = tokenizer.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )
__lowercase = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowercase__ )
__lowercase = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowercase__ )
__lowercase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowercase__ ,lowercase__ )
self.assertListEqual(lowercase__ ,[non_padded_sentence, padded_sentence] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = '''facebook/opt-350m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowercase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowercase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowercase__ ,return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowercase__ ,max_length=1_0 )
__lowercase = tokenizer.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )
predicted_outputs += generated_string
self.assertListEqual(lowercase__ ,lowercase__ )
| 41 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase__ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 41 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowercase_ :
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE ( self : int ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any=True ,lowercase__ : Dict=False ,lowercase__ : Dict=False ,lowercase__ : Tuple=False ,):
__lowercase = 4
__lowercase = 3_2
__lowercase = (3_2, 3_2)
__lowercase = torch.manual_seed(0 )
__lowercase = torch.device(lowercase__ )
__lowercase = (batch_size, num_channels) + sizes
__lowercase = randn_tensor(lowercase__ ,generator=lowercase__ ,device=lowercase__ )
__lowercase = {'''hidden_states''': hidden_states}
if include_temb:
__lowercase = 1_2_8
__lowercase = randn_tensor((batch_size, temb_channels) ,generator=lowercase__ ,device=lowercase__ )
if include_res_hidden_states_tuple:
__lowercase = torch.manual_seed(1 )
__lowercase = (randn_tensor(lowercase__ ,generator=lowercase__ ,device=lowercase__ ),)
if include_encoder_hidden_states:
__lowercase = floats_tensor((batch_size, 3_2, 3_2) ).to(lowercase__ )
if include_skip_sample:
__lowercase = randn_tensor(((batch_size, 3) + sizes) ,generator=lowercase__ ,device=lowercase__ )
return dummy_input
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
__lowercase = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__lowercase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**lowercase__ )
unet_block.to(lowercase__ )
unet_block.eval()
with torch.no_grad():
__lowercase = unet_block(**lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = output[0]
self.assertEqual(output.shape ,self.output_shape )
__lowercase = output[0, -1, -3:, -3:]
__lowercase = torch.tensor(lowercase__ ).to(lowercase__ )
assert torch_all_close(output_slice.flatten() ,lowercase__ ,atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' ,'''Training is not supported in mps''' )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**lowercase__ )
model.to(lowercase__ )
model.train()
__lowercase = model(**lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = output[0]
__lowercase = torch.device(lowercase__ )
__lowercase = randn_tensor(output.shape ,device=lowercase__ )
__lowercase = torch.nn.functional.mse_loss(lowercase__ ,lowercase__ )
loss.backward()
| 41 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMPipeline
SCREAMING_SNAKE_CASE : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE : int = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
SCREAMING_SNAKE_CASE : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,)
__lowercase = DDIMScheduler()
__lowercase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ,lowercase__ : int=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 3_2, 3_2, 3) )
__lowercase = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] )
__lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase__ ,1e-3 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''google/ddpm-cifar10-32'''
__lowercase = UNetaDModel.from_pretrained(lowercase__ )
__lowercase = DDIMScheduler()
__lowercase = DDIMPipeline(unet=lowercase__ ,scheduler=lowercase__ )
ddim.to(lowercase__ )
ddim.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.manual_seed(0 )
__lowercase = ddim(generator=lowercase__ ,eta=0.0 ,output_type='''numpy''' ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = '''google/ddpm-ema-bedroom-256'''
__lowercase = UNetaDModel.from_pretrained(lowercase__ )
__lowercase = DDIMScheduler.from_pretrained(lowercase__ )
__lowercase = DDIMPipeline(unet=lowercase__ ,scheduler=lowercase__ )
ddpm.to(lowercase__ )
ddpm.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.manual_seed(0 )
__lowercase = ddpm(generator=lowercase__ ,output_type='''numpy''' ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 1 |
'''simple docstring'''
from itertools import permutations
def _A ( A__ ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowercase = [7, 11, 13, 17]
for i, test in enumerate(A__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _A ( A__ = 10 ):
"""simple docstring"""
return sum(
int(''''''.join(map(A__ , A__ ) ) )
for num in permutations(range(A__ ) )
if is_substring_divisible(A__ ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCAmelCase__ = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _A ( A__ , A__ ):
"""simple docstring"""
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = json.loads(f.read() )
__lowercase = collections.OrderedDict()
__lowercase = collections.OrderedDict()
__lowercase = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
__lowercase = b
__lowercase = idx
for wd in b:
__lowercase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
def __init__( self : Tuple ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Any="<|endoftext|>" ,lowercase__ : Optional[Any]="<|endoftext|>" ,lowercase__ : int="<|startoftext|>" ,lowercase__ : Dict="<|endoftext|>" ,lowercase__ : Tuple=False ,**lowercase__ : Dict ,):
super().__init__(
unk_token=lowercase__ ,pad_token=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,do_clean_text=lowercase__ ,**lowercase__ ,)
if not os.path.isfile(lowercase__ ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(lowercase__ ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
__lowercase = do_clean_text
__lowercase , __lowercase , __lowercase , __lowercase = load_vocab_and_emoji(lowercase__ ,lowercase__ )
__lowercase = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE ( self : Any ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
return self.subword_tokenizer.tokenize(lowercase__ ,clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
return self.vocab.get(lowercase__ ,self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
return self.subword_tokenizer.convert_id_to_token(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = ''''''.join(lowercase__ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : "Conversation" ):
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ ,add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
__lowercase = 0
if os.path.isdir(lowercase__ ):
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
__lowercase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
__lowercase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''' )
__lowercase = token_index
writer.write(''','''.join(lowercase__ ) + '''\n''' )
index += 1
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as writer:
json.dump(self.emoji ,lowercase__ )
return vocab_file, emoji_file
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : str ):
__lowercase = vocab # same as swe
__lowercase = ids_to_tokens # same as bpe
__lowercase = emoji
__lowercase = np.max([len(lowercase__ ) for w in self.vocab.keys()] )
__lowercase = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
__lowercase = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
__lowercase = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
__lowercase = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
__lowercase = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
__lowercase = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
__lowercase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
__lowercase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
__lowercase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : Dict ):
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ):
__lowercase = self.content_repattera.sub('''<URL>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<EMAIL>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<TEL>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<DATE>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<DATE>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<PRICE>''' ,lowercase__ )
__lowercase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowercase = content.replace('''<BLOCK><BLOCK>''' ,'''<BLOCK>''' )
return content
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = text.replace(''' ''' ,'''<SP>''' )
__lowercase = text.replace(''' ''' ,'''<SP>''' )
__lowercase = text.replace('''\r\n''' ,'''<BR>''' )
__lowercase = text.replace('''\n''' ,'''<BR>''' )
__lowercase = text.replace('''\r''' ,'''<BR>''' )
__lowercase = text.replace('''\t''' ,'''<TAB>''' )
__lowercase = text.replace('''—''' ,'''ー''' )
__lowercase = text.replace('''−''' ,'''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowercase = text.replace(lowercase__ ,lowercase__ )
if clean:
__lowercase = self.clean_text(lowercase__ )
def check_simbol(lowercase__ : str ):
__lowercase = x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 2:
__lowercase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(lowercase__ : Dict ):
__lowercase = x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 3:
__lowercase = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
__lowercase = 0
__lowercase = []
while pos < len(lowercase__ ):
__lowercase = min(len(lowercase__ ) ,pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
__lowercase = [] # (token_id, token, pos)
for e in range(lowercase__ ,lowercase__ ,-1 ):
__lowercase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowercase__ ) > 2:
__lowercase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowercase__ ) > 0:
# the smallest token_id is adopted
__lowercase , __lowercase , __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x[0] )[0]
result.append(lowercase__ )
__lowercase = e
else:
__lowercase = pos + 1
__lowercase = text[pos:end]
if check_simbol(lowercase__ ):
result.append('''<KIGOU>''' )
elif checkuae(lowercase__ ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
__lowercase = end
return result
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[Any]="\n" ):
__lowercase = []
__lowercase = []
__lowercase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode('''utf-8''' ,errors='''replace''' ) )
__lowercase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(lowercase__ )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(lowercase__ )
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode('''utf-8''' ,errors='''replace''' ) )
__lowercase = ''''''.join(lowercase__ )
return text
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
lowerCAmelCase__ = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = CamembertTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = CamembertTokenizerFast
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
def SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = CamembertTokenizer(lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''<pad>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) ,lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-1] ,'''<mask>''' )
self.assertEqual(len(lowercase__ ) ,1_0_0_4 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_5 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = CamembertTokenizer(lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
__lowercase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowercase = '''I was born in 92000, and this is falsé.'''
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowercase = tokenizer.convert_ids_to_tokens(lowercase__ )
__lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = '''I was born in 92000, and this is falsé.'''
__lowercase = tokenizer.tokenize(lowercase__ )
__lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# fmt: off
__lowercase = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowercase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ ,model_name='''camembert-base''' ,revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' ,sequences=lowercase__ ,)
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _A ( A__ ):
"""simple docstring"""
__lowercase = tmp_path / '''file.csv'''
__lowercase = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(A__ , '''w''' ) as f:
f.write(A__ )
return str(A__ )
@pytest.fixture
def _A ( A__ ):
"""simple docstring"""
__lowercase = tmp_path / '''malformed_file.csv'''
__lowercase = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(A__ , '''w''' ) as f:
f.write(A__ )
return str(A__ )
@pytest.fixture
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = tmp_path / '''csv_with_image.csv'''
__lowercase = textwrap.dedent(
F"\\n image\n {image_file}\n " )
with open(A__ , '''w''' ) as f:
f.write(A__ )
return str(A__ )
@pytest.fixture
def _A ( A__ ):
"""simple docstring"""
__lowercase = tmp_path / '''csv_with_label.csv'''
__lowercase = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(A__ , '''w''' ) as f:
f.write(A__ )
return str(A__ )
@pytest.fixture
def _A ( A__ ):
"""simple docstring"""
__lowercase = tmp_path / '''csv_with_int_list.csv'''
__lowercase = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(A__ , '''w''' ) as f:
f.write(A__ )
return str(A__ )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = Csv()
__lowercase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(A__ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(A__ ) in record.message
for record in caplog.records )
@require_pil
def _A ( A__ ):
"""simple docstring"""
with open(A__ , encoding='''utf-8''' ) as f:
__lowercase = f.read().splitlines()[1]
__lowercase = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
__lowercase = csv._generate_tables([[csv_file_with_image]] )
__lowercase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__lowercase = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _A ( A__ ):
"""simple docstring"""
with open(A__ , encoding='''utf-8''' ) as f:
__lowercase = f.read().splitlines()[1:]
__lowercase = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__lowercase = csv._generate_tables([[csv_file_with_label]] )
__lowercase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__lowercase = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(A__ ) for label in labels]
def _A ( A__ ):
"""simple docstring"""
__lowercase = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda A__ : [int(A__ ) for i in x.split()]} )
__lowercase = csv._generate_tables([[csv_file_with_int_list]] )
__lowercase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__lowercase = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 41 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 1 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 1 |
'''simple docstring'''
from math import factorial, radians
def _A ( A__ , A__ = 18 , A__ = 10 ):
"""simple docstring"""
__lowercase = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
__lowercase = radians(A__ )
__lowercase = angle_in_radians
__lowercase = 3
__lowercase = -1
for _ in range(A__ ):
result += (b * (angle_in_radians**a)) / factorial(A__ )
__lowercase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(A__ , A__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 41 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
from collections.abc import Generator
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = 0, 1
while True:
__lowercase , __lowercase = b, a + b
yield b
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase = 1
__lowercase = fibonacci_generator()
while len(str(next(A__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 1 |
'''simple docstring'''
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__lowercase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowercase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _A ( A__ ):
"""simple docstring"""
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowercase_ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ,lowercase__ : Optional[int]=None ,**lowercase__ : str ):
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase__ ,lowercase__ )
__lowercase = TFVisionTextDualEncoderModel(lowercase__ )
__lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : Optional[int]=None ,**lowercase__ : Any ):
__lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ )
__lowercase = TFVisionTextDualEncoderModel(vision_model=lowercase__ ,text_model=lowercase__ )
__lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=None ,**lowercase__ : Any ):
__lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase__ )
__lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Any=None ,**lowercase__ : Dict ):
__lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ )
__lowercase = TFVisionTextDualEncoderModel(vision_model=lowercase__ ,text_model=lowercase__ )
__lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(lowercase__ )
__lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ ,1e-5 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any]=None ,**lowercase__ : List[Any] ):
__lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ )
__lowercase = TFVisionTextDualEncoderModel(vision_model=lowercase__ ,text_model=lowercase__ )
__lowercase = model(
input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ,output_attentions=lowercase__ )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(lowercase__ ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(lowercase__ ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : np.ndarray ,lowercase__ : float ):
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(lowercase__ ,lowercase__ ,F"Difference between torch and flax is {diff} (>= {tol})." )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**lowercase__ )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase__ )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(lowercase__ )
__lowercase = model_a(**lowercase__ )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ ,1e-5 )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-random-bert''' )
__lowercase = 1_3
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ):
__lowercase = TFViTModel(lowercase__ ,name='''vision_model''' )
__lowercase = TFBertModel(lowercase__ ,name='''text_model''' )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' ,'''hf-internal-testing/tiny-random-roberta''' )
__lowercase = 1_3
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[int] ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : str ,lowercase__ : Optional[Any]=None ,**lowercase__ : str ):
__lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ )
__lowercase = TFVisionTextDualEncoderModel(vision_model=lowercase__ ,text_model=lowercase__ )
__lowercase = model(
input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ,output_attentions=lowercase__ )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(lowercase__ ) ,vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(lowercase__ ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Dict ,lowercase__ : Tuple ):
__lowercase = TFDeiTModel(lowercase__ ,name='''vision_model''' )
__lowercase = TFRobertaModel(lowercase__ ,name='''text_model''' )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' ,'''hf-internal-testing/tiny-random-bert''' )
__lowercase = 1_3
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ):
__lowercase = TFCLIPVisionModel(lowercase__ ,name='''vision_model''' )
__lowercase = TFBertModel(lowercase__ ,name='''text_model''' )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 ,from_pt=lowercase__ )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowercase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=lowercase__ ,padding=lowercase__ ,return_tensors='''np''' )
__lowercase = model(**lowercase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
__lowercase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() ,lowercase__ ,atol=1e-3 ) )
| 41 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCAmelCase__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowerCAmelCase__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowerCAmelCase__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowerCAmelCase__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowerCAmelCase__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) ,homepage='''https://github.com/openai/human-eval''' ,codebase_urls=['''https://github.com/openai/human-eval'''] ,reference_urls=['''https://github.com/openai/human-eval'''] ,license=_LICENSE ,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : int=[1, 1_0, 1_0_0] ,lowercase__ : Any=4 ,lowercase__ : Union[str, Any]=3.0 ):
if os.getenv('''HF_ALLOW_CODE_EVAL''' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=lowercase__ ) as executor:
__lowercase = []
__lowercase = Counter()
__lowercase = 0
__lowercase = defaultdict(lowercase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowercase__ ,lowercase__ ) ):
for candidate in candidates:
__lowercase = candidate + '''\n''' + test_case
__lowercase = (test_program, timeout, task_id, completion_id[task_id])
__lowercase = executor.submit(lowercase__ ,*lowercase__ )
futures.append(lowercase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase__ ):
__lowercase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__lowercase , __lowercase = [], []
for result in results.values():
result.sort()
__lowercase = [r[1]['''passed'''] for r in result]
total.append(len(lowercase__ ) )
correct.append(sum(lowercase__ ) )
__lowercase = np.array(lowercase__ )
__lowercase = np.array(lowercase__ )
__lowercase = k
__lowercase = {F"pass@{k}": estimate_pass_at_k(lowercase__ ,lowercase__ ,lowercase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
def estimator(A__ , A__ , A__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A__ , A__ ):
__lowercase = itertools.repeat(A__ , len(A__ ) )
else:
assert len(A__ ) == len(A__ )
__lowercase = iter(A__ )
return np.array([estimator(int(A__ ) , int(A__ ) , A__ ) for n, c in zip(A__ , A__ )] )
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : str ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : bool = True ,**lowercase__ : Any ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''shortest_edge''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
__lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Dict ,):
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(lowercase__ ,size=size['''shortest_edge'''] ,default_to_square=lowercase__ )
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Optional[int] ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : Union[int, float] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : str ,):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : bool = None ,lowercase__ : float = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowercase__ : Dict ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ ,param_name='''size''' ,default_to_square=lowercase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' ,default_to_square=lowercase__ )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 41 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _A ( ):
"""simple docstring"""
__lowercase = HfArgumentParser(A__ )
__lowercase = parser.parse_args_into_dataclasses()[0]
__lowercase = TensorFlowBenchmark(args=A__ )
try:
__lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowercase = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__lowercase = ''' '''.join(str(A__ ).split(''' ''' )[:-1] )
__lowercase = ''''''
__lowercase = eval(str(A__ ).split(''' ''' )[-1] )
__lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A__ )
if len(A__ ) > 0:
__lowercase = full_error_msg + begin_error_msg + str(A__ )
raise ValueError(A__ )
benchmark.run()
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ):
super().__init__()
__lowercase = nn.Linear(3 ,4 )
__lowercase = nn.BatchNormad(4 )
__lowercase = nn.Linear(4 ,5 )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ):
return self.lineara(self.batchnorm(self.lineara(lowercase__ ) ) )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Dict ,*lowercase__ : List[str] ,**lowercase__ : List[str] ):
return (args[0] + 1,) + args[1:], kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ):
return output + 1
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = ModelForTest()
__lowercase = ModelHook()
add_hook_to_module(lowercase__ ,lowercase__ )
self.assertEqual(test_model._hf_hook ,lowercase__ )
self.assertTrue(hasattr(lowercase__ ,'''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['''x'''] )
remove_hook_from_module(lowercase__ )
self.assertFalse(hasattr(lowercase__ ,'''_hf_hook''' ) )
self.assertFalse(hasattr(lowercase__ ,'''_old_forward''' ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = ModelForTest()
__lowercase = ModelHook()
add_hook_to_module(lowercase__ ,lowercase__ )
add_hook_to_module(lowercase__ ,lowercase__ ,append=lowercase__ )
self.assertEqual(isinstance(test_model._hf_hook ,lowercase__ ) ,lowercase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) ,2 )
self.assertTrue(hasattr(lowercase__ ,'''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['''x'''] )
remove_hook_from_module(lowercase__ )
self.assertFalse(hasattr(lowercase__ ,'''_hf_hook''' ) )
self.assertFalse(hasattr(lowercase__ ,'''_old_forward''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ModelForTest()
__lowercase = torch.randn(2 ,3 )
__lowercase = test_model(x + 1 )
__lowercase = test_model(x + 2 )
__lowercase = PreForwardHook()
add_hook_to_module(lowercase__ ,lowercase__ )
__lowercase = test_model(lowercase__ )
self.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase = PreForwardHook()
add_hook_to_module(lowercase__ ,lowercase__ )
__lowercase = test_model(lowercase__ )
self.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase = SequentialHook(PreForwardHook() ,PreForwardHook() )
add_hook_to_module(lowercase__ ,lowercase__ )
__lowercase = test_model(lowercase__ )
assert torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ModelForTest()
__lowercase = torch.randn(2 ,3 )
__lowercase = test_model(lowercase__ )
__lowercase = PostForwardHook()
add_hook_to_module(lowercase__ ,lowercase__ )
__lowercase = test_model(lowercase__ )
self.assertTrue(torch.allclose(lowercase__ ,output + 1 ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase = PostForwardHook()
add_hook_to_module(lowercase__ ,lowercase__ )
__lowercase = test_model(lowercase__ )
self.assertTrue(torch.allclose(lowercase__ ,output + 1 ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase = SequentialHook(PostForwardHook() ,PostForwardHook() )
add_hook_to_module(lowercase__ ,lowercase__ )
__lowercase = test_model(lowercase__ )
assert torch.allclose(lowercase__ ,output + 2 ,atol=1e-5 )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ModelForTest()
__lowercase = torch.randn(2 ,3 )
__lowercase = test_model(lowercase__ )
__lowercase = PostForwardHook()
add_hook_to_module(lowercase__ ,lowercase__ )
__lowercase = test_model(lowercase__ )
self.assertTrue(torch.allclose(lowercase__ ,output + 1 ) )
self.assertTrue(outputa.requires_grad )
__lowercase = True
__lowercase = test_model(lowercase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__lowercase = torch.randn(2 ,3 )
__lowercase = model(lowercase__ )
self.assertEqual(output.device ,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase__ ,AlignDevicesHook(io_same_device=lowercase__ ) )
__lowercase = torch.randn(2 ,3 ).to(0 )
__lowercase = model(lowercase__ )
self.assertEqual(output.device ,torch.device(0 ) )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
# This will move each submodule on different devices
__lowercase = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara ,AlignDevicesHook(**lowercase__ ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**lowercase__ ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**lowercase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device ,lowercase__ )
__lowercase = torch.randn(2 ,3 )
__lowercase = model(lowercase__ )
self.assertEqual(output.device ,lowercase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
# Now test with buffers included in the offload
__lowercase = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara ,AlignDevicesHook(**lowercase__ ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**lowercase__ ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**lowercase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('''meta''' ) )
__lowercase = torch.randn(2 ,3 )
__lowercase = model(lowercase__ )
self.assertEqual(output.device ,lowercase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
# This will move each submodule on different devices
__lowercase = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(lowercase__ ,execution_device=lowercase__ ,offload=lowercase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase = torch.device(lowercase__ )
self.assertEqual(model.batchnorm.running_mean.device ,lowercase__ )
__lowercase = torch.randn(2 ,3 )
__lowercase = model(lowercase__ )
self.assertEqual(output.device ,lowercase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase__ )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowercase__ ,execution_device=lowercase__ ,offload=lowercase__ ,offload_buffers=lowercase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('''meta''' ) )
__lowercase = torch.randn(2 ,3 )
__lowercase = model(lowercase__ )
self.assertEqual(output.device ,lowercase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase__ )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
# This will move each submodule on different devices
__lowercase = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
lowercase__ ,execution_device=lowercase__ ,offload=lowercase__ ,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase = torch.device(lowercase__ )
self.assertEqual(model.batchnorm.running_mean.device ,lowercase__ )
__lowercase = torch.randn(2 ,3 )
__lowercase = model(lowercase__ )
self.assertEqual(output.device ,lowercase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase__ )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase__ ,execution_device=lowercase__ ,offload=lowercase__ ,weights_map=model.state_dict() ,offload_buffers=lowercase__ ,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('''meta''' ) )
__lowercase = torch.randn(2 ,3 )
__lowercase = model(lowercase__ )
self.assertEqual(output.device ,lowercase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase__ )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('''cpu''' ) )
| 41 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 1 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = F"{sampling_rate}"
__lowercase = '''1'''
__lowercase = '''f32le'''
__lowercase = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowercase = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
__lowercase = output_stream[0]
__lowercase = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def _A ( A__ , A__ , A__ = "f32le" , ):
"""simple docstring"""
__lowercase = F"{sampling_rate}"
__lowercase = '''1'''
if format_for_conversion == "s16le":
__lowercase = 2
elif format_for_conversion == "f32le":
__lowercase = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
__lowercase = platform.system()
if system == "Linux":
__lowercase = '''alsa'''
__lowercase = '''default'''
elif system == "Darwin":
__lowercase = '''avfoundation'''
__lowercase = ''':0'''
elif system == "Windows":
__lowercase = '''dshow'''
__lowercase = '''default'''
__lowercase = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowercase = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def _A ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
__lowercase = stream_chunk_s
else:
__lowercase = chunk_length_s
__lowercase = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
__lowercase = np.intaa
__lowercase = 2
elif format_for_conversion == "f32le":
__lowercase = np.floataa
__lowercase = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
__lowercase = chunk_length_s / 6
__lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
__lowercase = [stride_length_s, stride_length_s]
__lowercase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowercase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowercase = datetime.datetime.now()
__lowercase = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
__lowercase = np.frombuffer(item['''raw'''] , dtype=A__ )
__lowercase = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__lowercase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _A ( A__ , A__ , A__ , A__ = False ):
"""simple docstring"""
__lowercase = b''''''
__lowercase , __lowercase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
__lowercase = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
__lowercase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
__lowercase = (_stride_left, stride_right)
__lowercase = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__lowercase = False
yield item
__lowercase = stride_left
__lowercase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
__lowercase = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__lowercase = False
yield item
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
__lowercase = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 41 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizerFast
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase = {'''unk_token''': '''<unk>'''}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,**lowercase__ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,**lowercase__ : str ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ):
__lowercase = '''lower newer'''
__lowercase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__lowercase = '''lower newer'''
__lowercase = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowercase = tokenizer.tokenize(lowercase__ ,add_prefix_space=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
__lowercase = '''lower newer'''
# Testing tokenization
__lowercase = tokenizer.tokenize(lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing conversion to ids without special tokens
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing conversion to ids with special tokens
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing the unknown token
__lowercase = tokens + [rust_tokenizer.unk_token]
__lowercase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,*lowercase__ : Optional[int] ,**lowercase__ : int ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='''<pad>''' )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input looooooooong''', '''This is a simple input''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer(lowercase__ ,padding='''max_length''' ,max_length=3_0 ,return_tensors='''np''' )
__lowercase = tokenizer(lowercase__ ,padding=lowercase__ ,truncate=lowercase__ ,return_tensors='''np''' )
__lowercase = tokenizer(*lowercase__ ,padding='''max_length''' ,max_length=6_0 ,return_tensors='''np''' )
__lowercase = tokenizer(lowercase__ ,padding=lowercase__ ,truncate=lowercase__ ,return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] ,3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] ,3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] ,6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] ,5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = '''$$$'''
__lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowercase__ ,add_bos_token=lowercase__ )
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer(lowercase__ )
__lowercase = tokenizer(lowercase__ )
self.assertEqual(out_s.input_ids[0] ,lowercase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowercase = tokenizer.decode(out_s.input_ids )
__lowercase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowercase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
__lowercase = [self.get_tokenizer(do_lower_case=lowercase__ ,add_bos_token=lowercase__ )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = '''Encode this.'''
__lowercase = '''This one too please.'''
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
encoded_sequence += tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.encode_plus(
lowercase__ ,lowercase__ ,add_special_tokens=lowercase__ ,return_special_tokens_mask=lowercase__ ,)
__lowercase = encoded_sequence_dict['''input_ids''']
__lowercase = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
__lowercase = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase__ )
]
__lowercase = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase__ ,lowercase__ )
@require_tokenizers
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__lowercase = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=lowercase__ )
__lowercase = '''A photo of a cat'''
__lowercase = tokenizer.encode(
lowercase__ ,)
self.assertEqual(lowercase__ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
__lowercase = AutoTokenizer.from_pretrained('''./test_opt''' )
__lowercase = tokenizer.encode(
lowercase__ ,)
self.assertEqual(lowercase__ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,use_slow=lowercase__ )
__lowercase = '''A photo of a cat'''
__lowercase = tokenizer.encode(
lowercase__ ,)
# Same as above
self.assertEqual(lowercase__ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=lowercase__ )
__lowercase = '''bos'''
__lowercase = tokenizer.get_vocab()['''bos''']
__lowercase = '''A photo of a cat'''
__lowercase = tokenizer.encode(
lowercase__ ,)
# We changed the bos token
self.assertEqual(lowercase__ ,[3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
__lowercase = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
__lowercase = tokenizer.encode(
lowercase__ ,)
self.assertEqual(lowercase__ ,[3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = os.path.abspath(A__ )
logger.info(F"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__lowercase = tf.train.list_variables(A__ )
__lowercase = []
__lowercase = []
__lowercase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__lowercase = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(F"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__lowercase = name[1:]
# figure out how many levels deep the name is
__lowercase = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(A__ )
# read data
__lowercase = tf.train.load_variable(A__ , A__ )
names.append('''/'''.join(A__ ) )
arrays.append(A__ )
logger.info(F"Read a total of {len(A__ ):,} layers" )
# Sanity check
if len(set(A__ ) ) != 1:
raise ValueError(F"Found layer names with different depths (layer depth {list(set(A__ ) )})" )
__lowercase = list(set(A__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(A__ , A__ ):
__lowercase = full_name.split('''/''' )
__lowercase = model
__lowercase = []
for i, m_name in enumerate(A__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__lowercase = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__lowercase = getattr(A__ , '''embeddings''' )
__lowercase = getattr(A__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__lowercase = getattr(A__ , '''encoder''' )
__lowercase = getattr(A__ , '''layer''' )
__lowercase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__lowercase = getattr(A__ , '''pooler''' )
__lowercase = getattr(A__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__lowercase = getattr(A__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__lowercase = getattr(A__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__lowercase = getattr(A__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__lowercase = getattr(A__ , '''token_type_embeddings''' )
else:
raise ValueError(F"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__lowercase = getattr(A__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__lowercase = getattr(A__ , '''attention''' )
__lowercase = getattr(A__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__lowercase = getattr(A__ , '''attention''' )
__lowercase = getattr(A__ , '''output''' )
__lowercase = getattr(A__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__lowercase = getattr(A__ , '''attention''' )
__lowercase = getattr(A__ , '''output''' )
__lowercase = getattr(A__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__lowercase = getattr(A__ , '''output''' )
__lowercase = getattr(A__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__lowercase = getattr(A__ , '''output''' )
__lowercase = getattr(A__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__lowercase = getattr(A__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__lowercase = getattr(A__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__lowercase = getattr(A__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__lowercase = getattr(A__ , '''intermediate''' )
__lowercase = getattr(A__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__lowercase = getattr(A__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__lowercase = getattr(A__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__lowercase = getattr(A__ , '''weight''' )
else:
logger.warning(F"Ignored {m_name}" )
# for certain layers reshape is necessary
__lowercase = '''.'''.join(A__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , A__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , A__ ):
__lowercase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__lowercase = array.transpose()
if pointer.shape == array.shape:
__lowercase = torch.from_numpy(A__ )
else:
raise ValueError(
F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
F" {array.shape}" )
logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
logger.info(F"Loading model based on config from {config_path}..." )
__lowercase = BertConfig.from_json_file(A__ )
__lowercase = BertModel(A__ )
# Load weights from checkpoint
logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(A__ , A__ , A__ )
# Save pytorch-model
logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCAmelCase__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'mctct'
def __init__( self : Any ,lowercase__ : Any=8_0_6_5 ,lowercase__ : Optional[Any]=1_5_3_6 ,lowercase__ : Dict=3_6 ,lowercase__ : List[Any]=6_1_4_4 ,lowercase__ : Any=4 ,lowercase__ : Tuple=3_8_4 ,lowercase__ : Tuple=9_2_0 ,lowercase__ : List[Any]=1e-5 ,lowercase__ : Optional[Any]=0.3 ,lowercase__ : str="relu" ,lowercase__ : Optional[Any]=0.0_2 ,lowercase__ : List[str]=0.3 ,lowercase__ : Any=0.3 ,lowercase__ : Optional[int]=1 ,lowercase__ : Optional[Any]=0 ,lowercase__ : Dict=2 ,lowercase__ : int=1 ,lowercase__ : Optional[Any]=0.3 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : Optional[int]=(7,) ,lowercase__ : Optional[int]=(3,) ,lowercase__ : Dict=8_0 ,lowercase__ : List[Any]=1 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple="sum" ,lowercase__ : Optional[int]=False ,**lowercase__ : Dict ,):
super().__init__(**lowercase__ ,pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = num_attention_heads
__lowercase = attention_head_dim
__lowercase = max_position_embeddings
__lowercase = layer_norm_eps
__lowercase = layerdrop
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = conv_glu_dim
__lowercase = conv_dropout
__lowercase = num_conv_layers
__lowercase = input_feat_per_channel
__lowercase = input_channels
__lowercase = conv_channels
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowercase = list(lowercase__ )
__lowercase = list(lowercase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCAmelCase__ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
lowerCAmelCase__ = None
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=A__ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=A__ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowercase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def _A ( A__ ):
"""simple docstring"""
def remove_articles(A__ ):
return ARTICLES_REGEX.sub(''' ''' , A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
__lowercase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def _A ( A__ ):
"""simple docstring"""
if not s:
return []
return normalize_answer(A__ ).split()
def _A ( A__ , A__ ):
"""simple docstring"""
return int(normalize_answer(A__ ) == normalize_answer(A__ ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = get_tokens(A__ )
__lowercase = get_tokens(A__ )
__lowercase = collections.Counter(A__ ) & collections.Counter(A__ )
__lowercase = sum(common.values() )
if len(A__ ) == 0 or len(A__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowercase = 1.0 * num_same / len(A__ )
__lowercase = 1.0 * num_same / len(A__ )
__lowercase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowercase = qa['''id''']
__lowercase = [t for t in qa['''answers''']['''text'''] if normalize_answer(A__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowercase = ['''''']
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
__lowercase = preds[qid]
# Take max over all gold answers
__lowercase = max(compute_exact(A__ , A__ ) for a in gold_answers )
__lowercase = max(compute_fa(A__ , A__ ) for a in gold_answers )
return exact_scores, fa_scores
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = {}
for qid, s in scores.items():
__lowercase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowercase = float(not qid_to_has_ans[qid] )
else:
__lowercase = s
return new_scores
def _A ( A__ , A__ , A__=None ):
"""simple docstring"""
if not qid_list:
__lowercase = len(A__ )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowercase = len(A__ )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for k in new_eval:
__lowercase = new_eval[k]
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
plt.step(A__ , A__ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(A__ , A__ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(A__ )
plt.savefig(A__ )
plt.clf()
def _A ( A__ , A__ , A__ , A__ , A__=None , A__=None ):
"""simple docstring"""
__lowercase = sorted(A__ , key=lambda A__ : na_probs[k] )
__lowercase = 0.0
__lowercase = 1.0
__lowercase = 0.0
__lowercase = [1.0]
__lowercase = [0.0]
__lowercase = 0.0
for i, qid in enumerate(A__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowercase = true_pos / float(i + 1 )
__lowercase = true_pos / float(A__ )
if i == len(A__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(A__ )
recalls.append(A__ )
if out_image:
plot_pr_curve(A__ , A__ , A__ , A__ )
return {"ap": 1_0_0.0 * avg_prec}
def _A ( A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
if out_image_dir and not os.path.exists(A__ ):
os.makedirs(A__ )
__lowercase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowercase = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowercase = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowercase = {k: float(A__ ) for k, v in qid_to_has_ans.items()}
__lowercase = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(A__ , A__ , '''pr_exact''' )
merge_eval(A__ , A__ , '''pr_f1''' )
merge_eval(A__ , A__ , '''pr_oracle''' )
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if not qid_list:
return
__lowercase = [na_probs[k] for k in qid_list]
__lowercase = np.ones_like(A__ ) / float(len(A__ ) )
plt.hist(A__ , weights=A__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(A__ , F"na_prob_hist_{name}.png" ) )
plt.clf()
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowercase = num_no_ans
__lowercase = cur_score
__lowercase = 0.0
__lowercase = sorted(A__ , key=lambda A__ : na_probs[k] )
for i, qid in enumerate(A__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowercase = scores[qid]
else:
if preds[qid]:
__lowercase = -1
else:
__lowercase = 0
cur_score += diff
if cur_score > best_score:
__lowercase = cur_score
__lowercase = na_probs[qid]
return 1_0_0.0 * best_score / len(A__ ), best_thresh
def _A ( A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = find_best_thresh(A__ , A__ , A__ , A__ )
__lowercase , __lowercase = find_best_thresh(A__ , A__ , A__ , A__ )
__lowercase = best_exact
__lowercase = exact_thresh
__lowercase = best_fa
__lowercase = fa_thresh
def _A ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
__lowercase = json.load(A__ )
__lowercase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowercase = json.load(A__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowercase = json.load(A__ )
else:
__lowercase = {k: 0.0 for k in preds}
__lowercase = make_qid_to_has_ans(A__ ) # maps qid to True/False
__lowercase = [k for k, v in qid_to_has_ans.items() if v]
__lowercase = [k for k, v in qid_to_has_ans.items() if not v]
__lowercase , __lowercase = get_raw_scores(A__ , A__ )
__lowercase = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh )
__lowercase = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh )
__lowercase = make_eval_dict(A__ , A__ )
if has_ans_qids:
__lowercase = make_eval_dict(A__ , A__ , qid_list=A__ )
merge_eval(A__ , A__ , '''HasAns''' )
if no_ans_qids:
__lowercase = make_eval_dict(A__ , A__ , qid_list=A__ )
merge_eval(A__ , A__ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(A__ , A__ , A__ , A__ , A__ , A__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(A__ , A__ , A__ , A__ , A__ , OPTS.out_image_dir )
histogram_na_prob(A__ , A__ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(A__ , A__ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(A__ , A__ )
else:
print(json.dumps(A__ , indent=2 ) )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if len(A__ ) != len(A__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(A__ , A__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(A__ )
# declaring useful variables
__lowercase = len(A__ )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(A__ )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
lowerCAmelCase__ = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
lowerCAmelCase__ = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
lowerCAmelCase__ = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 41 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
def _A ( ):
"""simple docstring"""
__lowercase = Node(1 )
__lowercase = Node(2 )
__lowercase = Node(3 )
__lowercase = Node(4 )
__lowercase = Node(5 )
return tree
def _A ( A__ ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _A ( A__ ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _A ( A__ ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _A ( A__ ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
if root is None:
return output
__lowercase = deque([root] )
while process_queue:
__lowercase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
def populate_output(A__ , A__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A__ , A__ )
return output
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
def populate_output(A__ , A__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A__ , A__ )
return output
def _A ( A__ ):
"""simple docstring"""
if root is None:
return []
__lowercase = []
__lowercase = 0
__lowercase = height(A__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A__ , A__ ) )
__lowercase = 1
else:
output.append(get_nodes_from_right_to_left(A__ , A__ ) )
__lowercase = 0
return output
def _A ( ): # Main function for testing.
"""simple docstring"""
__lowercase = make_tree()
print(F"In-order Traversal: {inorder(A__ )}" )
print(F"Pre-order Traversal: {preorder(A__ )}" )
print(F"Post-order Traversal: {postorder(A__ )}" , '''\n''' )
print(F"Height of Tree: {height(A__ )}" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(A__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(A__ ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(A__ , level=A__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 1 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ , A__ , A__ , A__ , ):
"""simple docstring"""
__lowercase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print('''''' )
print(len(A__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 41 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCAmelCase__ = pd.read_csv('''sample_data.csv''', header=None)
lowerCAmelCase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCAmelCase__ = df.iloc[:, 1:2]
lowerCAmelCase__ = actual_data.values.reshape(len_data, 1)
lowerCAmelCase__ = MinMaxScaler().fit_transform(actual_data)
lowerCAmelCase__ = 10
lowerCAmelCase__ = 5
lowerCAmelCase__ = 20
lowerCAmelCase__ = len_data - periods * look_back
lowerCAmelCase__ = actual_data[:division]
lowerCAmelCase__ = actual_data[division - look_back :]
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCAmelCase__ = np.array(train_x)
lowerCAmelCase__ = np.array(test_x)
lowerCAmelCase__ = np.array([list(i.ravel()) for i in train_y])
lowerCAmelCase__ = np.array([list(i.ravel()) for i in test_y])
lowerCAmelCase__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
lowerCAmelCase__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCAmelCase__ = model.predict(x_test)
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
__lowercase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=A__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=A__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=A__ )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
__lowercase = parse_args()
# Import training_script as a module.
__lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase = script_fpath.stem
__lowercase = importlib.import_module(A__ )
# Patch sys.argv
__lowercase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
__lowercase = hex_num[0] == '''-'''
if is_negative:
__lowercase = hex_num[1:]
try:
__lowercase = int(A__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
__lowercase = ''''''
while int_num > 0:
__lowercase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.