code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = jax.device_count()
SCREAMING_SNAKE_CASE__ = num_samples * [prompt]
SCREAMING_SNAKE_CASE__ = sd_pipe.prepare_inputs(_snake_case )
SCREAMING_SNAKE_CASE__ = replicate(_snake_case )
SCREAMING_SNAKE_CASE__ = shard(_snake_case )
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(_snake_case , jax.device_count() )
SCREAMING_SNAKE_CASE__ = sd_pipe(_snake_case , _snake_case , _snake_case , num_inference_steps=25 , jit=_snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
SCREAMING_SNAKE_CASE__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2'''
SCREAMING_SNAKE_CASE__ = FlaxDPMSolverMultistepScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = FlaxStableDiffusionPipeline.from_pretrained(
_snake_case , scheduler=_snake_case , revision='''bf16''' , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE__ = scheduler_params
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = jax.device_count()
SCREAMING_SNAKE_CASE__ = num_samples * [prompt]
SCREAMING_SNAKE_CASE__ = sd_pipe.prepare_inputs(_snake_case )
SCREAMING_SNAKE_CASE__ = replicate(_snake_case )
SCREAMING_SNAKE_CASE__ = shard(_snake_case )
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(_snake_case , jax.device_count() )
SCREAMING_SNAKE_CASE__ = sd_pipe(_snake_case , _snake_case , _snake_case , num_inference_steps=25 , jit=_snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
SCREAMING_SNAKE_CASE__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 100 |
from torch import nn
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple ):
super().__init__()
__lowercase : Any = class_size
__lowercase : List[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : Dict = nn.Linear(_snake_case , _snake_case )
def snake_case_ ( self : Any , _snake_case : str ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__lowercase : Any = self.mlp(_snake_case )
return logits
| 509 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """encodec"""
def __init__( self , _snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , _snake_case=2_4000 , _snake_case=1 , _snake_case=False , _snake_case=None , _snake_case=None , _snake_case=128 , _snake_case=32 , _snake_case=1 , _snake_case=[8, 5, 4, 2] , _snake_case="weight_norm" , _snake_case=7 , _snake_case=7 , _snake_case=3 , _snake_case=2 , _snake_case=True , _snake_case="reflect" , _snake_case=2 , _snake_case=2 , _snake_case=1.0 , _snake_case=1024 , _snake_case=None , _snake_case=True , **_snake_case , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_snake_case )
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 391 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__magic_name__ = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(A__ )
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """rag"""
__SCREAMING_SNAKE_CASE = True
def __init__( self , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=" / " , _snake_case=" // " , _snake_case=5 , _snake_case=300 , _snake_case=768 , _snake_case=8 , _snake_case="wiki_dpr" , _snake_case="train" , _snake_case="compressed" , _snake_case=None , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=0.0 , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=True , _snake_case=None , **_snake_case , ) -> str:
"""simple docstring"""
super().__init__(
bos_token_id=_snake_case , pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , is_encoder_decoder=_snake_case , prefix=_snake_case , vocab_size=_snake_case , **_snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase = kwargs.pop('''question_encoder''' )
UpperCAmelCase = question_encoder_config.pop('''model_type''' )
UpperCAmelCase = kwargs.pop('''generator''' )
UpperCAmelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase = AutoConfig.for_model(_snake_case , **_snake_case )
UpperCAmelCase = AutoConfig.for_model(_snake_case , **_snake_case )
UpperCAmelCase = reduce_loss
UpperCAmelCase = label_smoothing
UpperCAmelCase = exclude_bos_score
UpperCAmelCase = do_marginalize
UpperCAmelCase = title_sep
UpperCAmelCase = doc_sep
UpperCAmelCase = n_docs
UpperCAmelCase = max_combined_length
UpperCAmelCase = dataset
UpperCAmelCase = dataset_split
UpperCAmelCase = index_name
UpperCAmelCase = retrieval_vector_size
UpperCAmelCase = retrieval_batch_size
UpperCAmelCase = passages_path
UpperCAmelCase = index_path
UpperCAmelCase = use_dummy_dataset
UpperCAmelCase = output_retrieved
UpperCAmelCase = do_deduplication
UpperCAmelCase = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase = getattr(self.generator , '''forced_eos_token_id''' , _snake_case )
@classmethod
def snake_case_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_snake_case )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.question_encoder.to_dict()
UpperCAmelCase = self.generator.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 391 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : UNetaDModel
A__ : KarrasVeScheduler
def __init__( self : List[str] , _snake_case : UNetaDModel , _snake_case : KarrasVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , _snake_case : int = 1 , _snake_case : int = 50 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : int , ):
"""simple docstring"""
A__ = self.unet.config.sample_size
A__ = (batch_size, 3, img_size, img_size)
A__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ = randn_tensor(_snake_case , generator=_snake_case , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ = self.scheduler.schedule[t]
A__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ , A__ = self.scheduler.add_noise_to_input(_snake_case , _snake_case , generator=_snake_case )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ = self.scheduler.step(_snake_case , _snake_case , _snake_case , _snake_case )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A__ = self.scheduler.step_correct(
_snake_case , _snake_case , _snake_case , _snake_case , step_output.prev_sample , step_output['derivative'] , )
A__ = step_output.prev_sample
A__ = (sample / 2 + 0.5).clamp(0 , 1 )
A__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 9 |
from manim import *
class A__ ( __snake_case ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCamelCase = Rectangle(height=0.2_5 , width=0.2_5 )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('CPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(4 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('GPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Model' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(_SCREAMING_SNAKE_CASE )
model_arr.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Disk' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.2_5, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = Square(0.3 )
input.set_fill(_SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(_SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0_2 )
self.play(MoveToTarget(_SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = Arrow(start=_SCREAMING_SNAKE_CASE , end=_SCREAMING_SNAKE_CASE , color=_SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , _SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCamelCase = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
UpperCamelCase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.0_2}
self.play(
Write(_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCamelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , _SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
UpperCamelCase = AnimationGroup(
FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCamelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCamelCase = a_c
UpperCamelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE ) , FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
UpperCamelCase = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(_SCREAMING_SNAKE_CASE ) )
self.wait()
| 280 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
_lowercase =tempfile.mkdtemp()
_lowercase =BlipImageProcessor()
_lowercase =BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_lowercase =BlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **lowerCAmelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def __lowerCAmelCase ( self , **lowerCAmelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ):
_lowercase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowercase =[Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ):
_lowercase =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_lowercase =self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_lowercase =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_lowercase =self.prepare_image_inputs()
_lowercase =image_processor(lowerCAmelCase_ , return_tensors="np" )
_lowercase =processor(images=lowerCAmelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self ):
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_lowercase ="lower newer"
_lowercase =processor(text=lowerCAmelCase_ )
_lowercase =tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ):
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_lowercase ="lower newer"
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def __lowerCAmelCase ( self ):
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase =processor.batch_decode(lowerCAmelCase_ )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_lowercase ="lower newer"
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 594 | import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _a ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BartphoTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def __lowerCAmelCase ( self ):
super().setUp()
_lowercase =["▁This", "▁is", "▁a", "▁t", "est"]
_lowercase =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_lowercase ={"unk_token": "<unk>"}
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
_lowercase =BartphoTokenizer(lowerCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase ="This is a là test"
_lowercase ="This is a<unk><unk> test"
return input_text, output_text
def __lowerCAmelCase ( self ):
_lowercase =BartphoTokenizer(lowerCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
_lowercase ="This is a là test"
_lowercase ="▁This ▁is ▁a ▁l à ▁t est".split()
_lowercase =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 594 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> Dict:
if exponent == 1:
return base
if exponent % 2 == 0:
__a = _modexpt(SCREAMING_SNAKE_CASE_ , exponent // 2 , SCREAMING_SNAKE_CASE_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE_ , exponent - 1 , SCREAMING_SNAKE_CASE_ )) % modulo_value
def lowercase ( lowerCAmelCase__ : int = 1777 , lowerCAmelCase__ : int = 1855 , lowerCAmelCase__ : int = 8 ) -> Any:
__a = base
for _ in range(1 , SCREAMING_SNAKE_CASE_ ):
__a = _modexpt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 695 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = GPTSwaTokenizer
lowercase = False
lowercase = True
lowercase = False
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = GPTSwaTokenizer(__magic_name__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """This is a test"""
UpperCamelCase = """This is a test"""
return input_text, output_text
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = """<s>"""
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 2_0_0_0 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(__magic_name__ )
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__magic_name__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
UpperCamelCase = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__magic_name__ )
# fmt: off
self.assertListEqual(
__magic_name__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(__magic_name__ )
UpperCamelCase = ["""This is a test""", """I was born in 92000, and this is falsé."""]
UpperCamelCase = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__magic_name__ , __magic_name__ ):
self.assertListEqual(tokenizer.encode_fast(__magic_name__ ) , __magic_name__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.decode_fast(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
UpperCamelCase = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__magic_name__ , )
| 386 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__: List[Any] = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: int = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
a__: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 212 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase__( )->Union[str, Any]:
A__ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return image
def UpperCamelCase__( UpperCamelCase__ : int )->Tuple:
A__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] )->Optional[Any]:
A__ = dct.pop(UpperCamelCase__ )
A__ = val
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] )->Union[str, Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
A__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
A__ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
A__ = qkv_bias
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any )->Union[str, Any]:
A__ = 3_64 if '''coco''' in model_name else 2_24
A__ = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A__ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
A__ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
A__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
A__ = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=False )->Dict:
A__ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
A__ = tokenizer('''\n''' , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A__ , A__ = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A__ = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
A__ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
A__ , A__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A__ , A__ , A__ = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
A__ = original_model.state_dict()
A__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A__ = state_dict.pop(UpperCamelCase__ )
if key.startswith('''Qformer.bert''' ):
A__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
A__ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
A__ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
A__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
A__ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
A__ = key.replace('''t5''' , '''language''' )
A__ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
A__ , A__ = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A__ = load_demo_image()
A__ = vis_processors['''eval'''](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A__ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(UpperCamelCase__ )
# create processor
A__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
A__ = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A__ = processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
A__ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
A__ = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
A__ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
A__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
A__ = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A__ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A__ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCamelCase__ )
else:
# cast to same type
A__ = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
A__ = ''''''
A__ = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids.to(UpperCamelCase__ )
A__ = original_model.generate({'''image''': original_pixel_values} )
A__ = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , UpperCamelCase__ )
A__ = input_ids.shape[1]
A__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
A__ = [text.strip() for text in output_text]
print('''HF generation:''' , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
a__: Any = argparse.ArgumentParser()
a__: int = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a__: List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 212 | 1 |
"""simple docstring"""
from collections import deque
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = process_name # process name
_A = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_A = arrival_time
_A = burst_time # remaining burst time
_A = 0 # total time of the process wait in ready queue
_A = 0 # time from arrival time to completion time
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int , ):
# total number of mlfq's queues
_A = number_of_queues
# time slice of queues that round robin algorithm applied
_A = time_slices
# unfinished process is in this ready_queue
_A = queue
# current time
_A = current_time
# finished process is in this sequence queue
_A = deque()
def lowerCAmelCase_ ( self : Any ):
_A = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : list[Process] ):
_A = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : list[Process] ):
_A = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : list[Process] ):
_A = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : deque[Process] ):
return [q.burst_time for q in queue]
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : deque[Process] ):
_A = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
_A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_A = 0
# set the process's turnaround time because it is finished
_A = self.current_time - cp.arrival_time
# set the completion time
_A = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int ):
_A = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
_A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_A = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_A = 0
# set the finish time
_A = self.current_time
# update the process' turnaround time because it is finished
_A = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCAmelCase_ ( self : Tuple ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
_A , _A = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a = Process('''P1''', 0, 53)
a = Process('''P2''', 0, 17)
a = Process('''P3''', 0, 68)
a = Process('''P4''', 0, 24)
a = 3
a = [17, 25]
a = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
a = Process('''P1''', 0, 53)
a = Process('''P2''', 0, 17)
a = Process('''P3''', 0, 68)
a = Process('''P4''', 0, 24)
a = 3
a = [17, 25]
a = deque([Pa, Pa, Pa, Pa])
a = MLFQ(number_of_queues, time_slices, queue, 0)
a = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 7 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase__ ( snake_case__ , snake_case__ ):
snake_case_ = '''bit'''
snake_case_ = ['''preactivation''', '''bottleneck''']
snake_case_ = ['''SAME''', '''VALID''']
def __init__( self , A__=3 , A__=64 , A__=[256, 512, 1024, 2048] , A__=[3, 4, 6, 3] , A__="preactivation" , A__="relu" , A__=None , A__=32 , A__=0.0 , A__=False , A__=32 , A__=1 , A__=None , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase_: str = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: List[Any] = embedding_size
UpperCAmelCase_: Union[str, Any] = hidden_sizes
UpperCAmelCase_: int = depths
UpperCAmelCase_: Tuple = layer_type
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: str = global_padding
UpperCAmelCase_: Dict = num_groups
UpperCAmelCase_: Dict = drop_path_rate
UpperCAmelCase_: Tuple = embedding_dynamic_padding
UpperCAmelCase_: Tuple = output_stride
UpperCAmelCase_: Dict = width_factor
UpperCAmelCase_: str = ["stem"] + [F"stage{idx}" for idx in range(1 , len(A__ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_: Tuple = get_aligned_output_features_output_indices(
out_features=A__ , out_indices=A__ , stage_names=self.stage_names ) | 137 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
SCREAMING_SNAKE_CASE_ : str = trt.Logger(trt.Logger.WARNING)
SCREAMING_SNAKE_CASE_ : List[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
SCREAMING_SNAKE_CASE_ : List[str] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
if args.tokenizer_name:
SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
SCREAMING_SNAKE_CASE_ : List[str] = args.per_device_eval_batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Dict = 'temp_engine/bert-fp32.engine'
if args.fpaa:
SCREAMING_SNAKE_CASE_ : Any = 'temp_engine/bert-fp16.engine'
if args.inta:
SCREAMING_SNAKE_CASE_ : int = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
SCREAMING_SNAKE_CASE_ : Any = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
SCREAMING_SNAKE_CASE_ : Tuple = [network.get_input(i) for i in range(network.num_inputs)]
SCREAMING_SNAKE_CASE_ : str = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
SCREAMING_SNAKE_CASE_ : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
SCREAMING_SNAKE_CASE_ : Dict = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
A__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
A__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
A__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , UpperCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , UpperCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , UpperCAmelCase_ )
# start time
A__ = time.time()
# Run inference
context.execute_async(
bindings=[int(UpperCAmelCase_ ) for d_inp in d_inputs] + [int(UpperCAmelCase_ ), int(UpperCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
cuda.memcpy_dtoh_async(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
A__ = time.time()
A__ = end_time - start_time
A__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE_ : int = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
SCREAMING_SNAKE_CASE_ : Tuple = raw_datasets['validation'].column_names
SCREAMING_SNAKE_CASE_ : Dict = 'question' if 'question' in column_names else column_names[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'context' if 'context' in column_names else column_names[1]
SCREAMING_SNAKE_CASE_ : List[str] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
SCREAMING_SNAKE_CASE_ : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( UpperCAmelCase_ : Tuple ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=UpperCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A__ = tokenized_examples.sequence_ids(UpperCAmelCase_ )
A__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
SCREAMING_SNAKE_CASE_ : Union[str, Any] = raw_datasets['validation']
# Validation Feature Creation
SCREAMING_SNAKE_CASE_ : List[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
SCREAMING_SNAKE_CASE_ : str = default_data_collator
SCREAMING_SNAKE_CASE_ : Dict = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
SCREAMING_SNAKE_CASE_ : str = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
A__ = postprocess_qa_predictions(
examples=UpperCAmelCase_ , features=UpperCAmelCase_ , predictions=UpperCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=UpperCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=UpperCAmelCase_ , label_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( UpperCAmelCase_ : List[Any] ):
return trt.volume(engine.get_binding_shape(UpperCAmelCase_ ) ) * engine.get_binding_dtype(UpperCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
SCREAMING_SNAKE_CASE_ : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
SCREAMING_SNAKE_CASE_ : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
SCREAMING_SNAKE_CASE_ : Tuple = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
SCREAMING_SNAKE_CASE_ : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
SCREAMING_SNAKE_CASE_ : Any = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
SCREAMING_SNAKE_CASE_ : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
SCREAMING_SNAKE_CASE_ : Any = 0.0
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Dict = timeit.default_timer()
SCREAMING_SNAKE_CASE_ : Dict = None
for step, batch in enumerate(eval_dataloader):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Tuple = outputs
SCREAMING_SNAKE_CASE_ : int = torch.tensor(start_logits)
SCREAMING_SNAKE_CASE_ : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
SCREAMING_SNAKE_CASE_ : int = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = nested_truncate(all_preds, len(eval_dataset))
SCREAMING_SNAKE_CASE_ : Optional[Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
SCREAMING_SNAKE_CASE_ : str = post_processing_function(eval_examples, eval_dataset, all_preds)
SCREAMING_SNAKE_CASE_ : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 500 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.dummy_uncond_unet
A__ = PNDMScheduler()
A__ = PNDMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pndm.to(UpperCamelCase )
pndm.set_progress_bar_config(disable=UpperCamelCase )
A__ = torch.manual_seed(0 )
A__ = pndm(generator=UpperCamelCase , num_inference_steps=20 , output_type="""numpy""" ).images
A__ = torch.manual_seed(0 )
A__ = pndm(generator=UpperCamelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCamelCase )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = """google/ddpm-cifar10-32"""
A__ = UNetaDModel.from_pretrained(UpperCamelCase )
A__ = PNDMScheduler()
A__ = PNDMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pndm.to(UpperCamelCase )
pndm.set_progress_bar_config(disable=UpperCamelCase )
A__ = torch.manual_seed(0 )
A__ = pndm(generator=UpperCamelCase , output_type="""numpy""" ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 500 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a :
UpperCamelCase : str = BlenderbotConfig
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : Optional[Any] = 'gelu'
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : List[str]=37 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Any=20 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : str=1 , lowerCAmelCase : str=0 , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =seq_length
SCREAMING_SNAKE_CASE_: Optional[int] =is_training
SCREAMING_SNAKE_CASE_: Any =use_labels
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: int =hidden_size
SCREAMING_SNAKE_CASE_: Any =num_hidden_layers
SCREAMING_SNAKE_CASE_: Tuple =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_: str =eos_token_id
SCREAMING_SNAKE_CASE_: int =pad_token_id
SCREAMING_SNAKE_CASE_: Dict =bos_token_id
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_: Optional[int] =tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_: Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: int =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_: Optional[int] =prepare_blenderbot_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =TFBlenderbotModel(config=lowerCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE_: Any =inputs_dict["""input_ids"""]
SCREAMING_SNAKE_CASE_: Union[str, Any] =input_ids[:1, :]
SCREAMING_SNAKE_CASE_: List[str] =inputs_dict["""attention_mask"""][:1, :]
SCREAMING_SNAKE_CASE_: Union[str, Any] =inputs_dict["""head_mask"""]
SCREAMING_SNAKE_CASE_: List[Any] =1
# first forward pass
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_: Optional[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_: int =tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_: int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_: List[Any] =model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_: Optional[int] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_: Tuple =output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_: Optional[Any] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase , lowerCAmelCase , rtol=1E-3 )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_: Tuple =tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_: Optional[Any] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE_: Optional[Any] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_: List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_: str =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase : int = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase : Dict = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase : List[str] = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Tuple = False
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TFBlenderbotModelTester(self )
SCREAMING_SNAKE_CASE_: Tuple =ConfigTester(self , config_class=lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
UpperCamelCase : Union[str, Any] = ['My friends are cool but they eat too many carbs.']
UpperCamelCase : str = 'facebook/blenderbot-400M-distill'
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.tokenizer(self.src_text , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE_: Optional[int] =self.model.generate(
model_inputs.input_ids , )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 409 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class a ( unittest.TestCase ):
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=99 , lowerCAmelCase : str=32 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : str=512 , lowerCAmelCase : Any=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Dict=4 , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =parent
SCREAMING_SNAKE_CASE_: Dict =batch_size
SCREAMING_SNAKE_CASE_: str =seq_length
SCREAMING_SNAKE_CASE_: int =is_training
SCREAMING_SNAKE_CASE_: Dict =use_attention_mask
SCREAMING_SNAKE_CASE_: List[str] =use_token_type_ids
SCREAMING_SNAKE_CASE_: Union[str, Any] =use_labels
SCREAMING_SNAKE_CASE_: Any =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict =num_attention_heads
SCREAMING_SNAKE_CASE_: str =intermediate_size
SCREAMING_SNAKE_CASE_: int =hidden_act
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE_: str =type_vocab_size
SCREAMING_SNAKE_CASE_: str =type_sequence_label_size
SCREAMING_SNAKE_CASE_: str =initializer_range
SCREAMING_SNAKE_CASE_: str =num_choices
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Any =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: str =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Tuple =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase , )
return config, input_ids, attention_mask
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class_name.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE_: List[str] =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
@require_flax
class a ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE_: Tuple =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE_: Any =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: Union[str, Any] =(1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
| 409 | 1 |
'''simple docstring'''
import numpy as np
import datasets
lowerCAmelCase_ : Optional[int] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
lowerCAmelCase_ : Optional[int] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
lowerCAmelCase_ : Union[str, Any] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence") , id="X"),
}) , )
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any]) -> int:
# convert to numpy arrays
lowercase_ = np.array(__lowerCAmelCase)
lowercase_ = np.array(__lowerCAmelCase)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension")
# Get mahalanobis distance for each prediction
lowercase_ = X - np.mean(__lowerCAmelCase)
lowercase_ = np.cov(reference_distribution.T)
try:
lowercase_ = np.linalg.inv(__lowerCAmelCase)
except np.linalg.LinAlgError:
lowercase_ = np.linalg.pinv(__lowerCAmelCase)
lowercase_ = np.dot(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = np.dot(__lowerCAmelCase , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 461 | '''simple docstring'''
def __a ( __lowerCamelCase : int = 600_851_475_143 ) -> int:
'''simple docstring'''
try:
lowercase_ = int(__lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
lowercase_ = 1
lowercase_ = 2
while i * i <= n:
while n % i == 0:
lowercase_ = i
n //= i
i += 1
if n > 1:
lowercase_ = n
return int(__lowerCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 461 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Dict = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class A ( _a ):
lowercase_ = 'mra'
def __init__( self : int , lowerCAmelCase_ : Optional[int]=5_02_65 , lowerCAmelCase_ : Union[str, Any]=7_68 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Optional[Any]=30_72 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[str]=5_12 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Optional[Any]="absolute" , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Tuple="full" , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=2 , **lowerCAmelCase_ : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = position_embedding_type
_a = block_per_row
_a = approx_mode
_a = initial_prior_first_n_blocks
_a = initial_prior_diagonal_n_blocks
| 22 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__a, num_labels=1_000, idalabel=__a, labelaid=__a, )
return config
def _lowerCamelCase ( __a ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a=False ):
SCREAMING_SNAKE_CASE_ = get_config(__a )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__a, pretrained=__a )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__a )
model.eval()
model.load_state_dict(__a )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__a ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__a, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__a, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__a ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__a, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__a, __a )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 719 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase_ = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
lowerCAmelCase_ = '''▁'''
class _snake_case( __a ):
__snake_case: List[str] = VOCAB_FILES_NAMES
__snake_case: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case: int = ['''input_ids''', '''attention_mask''']
def __init__(self : List[Any] , a : str , a : Optional[int]="<s>" , a : Tuple="</s>" , a : str="</s>" , a : Any="<s>" , a : Optional[Any]="<unk>" , a : Dict="<pad>" , a : Optional[int]="<mask>" , a : List[Any] = None , **a : Optional[Any] , ) -> None:
"""simple docstring"""
A__ = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
A__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
A__ = len(self.sp_model ) - 1
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase (self : str , a : Optional[Any] , a : int = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase (self : int , a : List[str] , a : Dict = None , a : str = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1, 1] + ([0] * len(A__ )) + [1]
def _UpperCamelCase (self : str , a : List[Any] , a : Union[str, Any] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase (self : Dict ) -> Any:
"""simple docstring"""
return len(self.sp_model )
def _UpperCamelCase (self : str ) -> List[str]:
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase (self : str , a : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A__ , out_type=A__ )
def _UpperCamelCase (self : List[Any] , a : str ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(A__ )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase (self : Optional[int] , a : Optional[int] ) -> List[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A__ )
def _UpperCamelCase (self : Dict , a : Optional[int] ) -> Any:
"""simple docstring"""
A__ = []
A__ = ''
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(A__ )
A__ = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def __getstate__(self : Any ) -> Any:
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__(self : Tuple , a : Any ) -> Any:
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase (self : List[Any] , a : List[str] , a : List[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
A__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , 'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 531 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowercase = logging.get_logger(__name__)
class _lowercase :
def __init__( self , A__ , A__ ) -> Tuple:
snake_case = question_encoder
snake_case = generator
snake_case = self.question_encoder
def UpperCamelCase ( self , A__ ) -> int:
if os.path.isfile(A__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A__ , exist_ok=A__ )
snake_case = os.path.join(A__ , '''question_encoder_tokenizer''' )
snake_case = os.path.join(A__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(A__ )
self.generator.save_pretrained(A__ )
@classmethod
def UpperCamelCase ( cls , A__ , **A__ ) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case = kwargs.pop('''config''' , A__ )
if config is None:
snake_case = RagConfig.from_pretrained(A__ )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=A__ , generator=A__ )
def __call__( self , *A__ , **A__ ) -> Any:
return self.current_tokenizer(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.decode(*A__ , **A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.question_encoder
def UpperCamelCase ( self ) -> str:
snake_case = self.generator
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = "longest" , A__ = None , A__ = True , **A__ , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , A__ , )
if max_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
A__ , add_special_tokens=A__ , return_tensors=A__ , max_length=A__ , padding=A__ , truncation=A__ , **A__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
text_target=A__ , add_special_tokens=A__ , return_tensors=A__ , padding=A__ , max_length=A__ , truncation=A__ , **A__ , )
snake_case = labels['''input_ids''']
return model_inputs
| 342 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[List[PIL.Image.Image], np.ndarray]
__lowercase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : np.ndarray
__lowercase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 208 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCamelCase__ : Optional[Any] = pytest.mark.integration
lowerCamelCase__ : Union[str, Any] = {"""comet"""}
lowerCamelCase__ : Dict = importlib.util.find_spec("""fairseq""") is not None
lowerCamelCase__ : List[Any] = {"""code_eval"""}
lowerCamelCase__ : Tuple = os.name == """nt"""
lowerCamelCase__ : str = {"""bertscore""", """frugalscore""", """perplexity"""}
lowerCamelCase__ : List[str] = importlib.util.find_spec("""transformers""") is not None
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case_ ,snake_case_ ,snake_case_ )
@local
class __magic_name__ (parameterized.TestCase ):
'''simple docstring'''
__lowercase : Tuple = {}
__lowercase : List[str] = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:str ):
snake_case__ = '''[...]'''
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _a ) ).module_path )
snake_case__ = datasets.load.import_main_class(metric_module.__name__ , dataset=_a )
# check parameters
snake_case__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_a , metric_module.__name__ ):
with self.use_local_metrics():
try:
snake_case__ = doctest.testmod(_a , verbose=_a , raise_on_error=_a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] ):
snake_case__ = '''[...]'''
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _a ) ).module_path )
# run doctest
with self.use_local_metrics():
snake_case__ = doctest.testmod(_a , verbose=_a , raise_on_error=_a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] , _a:Optional[int] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_a ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:str ):
def load_local_metric(_a:Dict , *_a:Optional[int] , **_a:Dict ):
return load_metric(os.path.join('''metrics''' , _a ) , *_a , **_a )
with patch('''datasets.load_metric''' ) as mock_load_metric:
snake_case__ = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:Tuple , _a:Tuple ):
def wrapper(_a:Dict ):
snake_case__ = contextmanager(_a )
snake_case__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Union[str, Any] ):
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
snake_case__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
import torch
def bert_cos_score_idf(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
snake_case__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
def load_from_checkpoint(__lowerCAmelCase ):
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Union[str, Any] , *_a:Optional[Any] , **_a:Any ):
assert len(_a ) == 2
snake_case__ = [0.19, 0.92]
return scores, sum(_a ) / len(_a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
snake_case__ = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
snake_case__ = load_from_checkpoint
yield
def SCREAMING_SNAKE_CASE ( ) -> int:
snake_case__ = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
snake_case__ = '''ERROR'''
snake_case__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=__lowerCAmelCase )
| 208 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : Tuple = model_type_to_module_name(lowerCamelCase_ )
_lowercase : Optional[int] = importlib.import_module(F'''.{module_name}''' , 'transformers.models' )
try:
return getattr(lowerCamelCase_ , lowerCamelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase_ , '__name__' , lowerCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : str = importlib.import_module('transformers' )
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
return getattr(lowerCamelCase_ , lowerCamelCase_ )
return None
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ) -> Dict:
_lowercase : List[Any] = get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCamelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCamelCase_ )
class _lowerCamelCase:
def __init__( self) -> Union[str, Any]:
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase)
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : str = kwargs.pop('config', lowerCamelCase)
_lowercase : List[Any] = kwargs.pop('trust_remote_code', lowerCamelCase)
_lowercase : List[Any] = True
_lowercase , _lowercase : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase, **lowerCamelCase)
_lowercase : Tuple = config_dict.get('feature_extractor_type', lowerCamelCase)
_lowercase : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map', {}):
_lowercase : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = AutoConfig.from_pretrained(lowerCamelCase, **lowerCamelCase)
# It could be in `config.feature_extractor_type``
_lowercase : Optional[int] = getattr(lowerCamelCase, 'feature_extractor_type', lowerCamelCase)
if hasattr(lowerCamelCase, 'auto_map') and "AutoFeatureExtractor" in config.auto_map:
_lowercase : Union[str, Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowercase : str = feature_extractor_class_from_name(lowerCamelCase)
_lowercase : Tuple = feature_extractor_auto_map is not None
_lowercase : int = feature_extractor_class is not None or type(lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING
_lowercase : int = resolve_trust_remote_code(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
if has_remote_code and trust_remote_code:
_lowercase : Dict = get_class_from_dynamic_module(
lowerCamelCase, lowerCamelCase, **lowerCamelCase)
_lowercase : Optional[int] = kwargs.pop('code_revision', lowerCamelCase)
if os.path.isdir(lowerCamelCase):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase, **lowerCamelCase)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase, **lowerCamelCase)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING:
_lowercase : str = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase)]
return feature_extractor_class.from_dict(lowerCamelCase, **lowerCamelCase)
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}''')
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase, lowerCamelCase)
| 89 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=snake_case__ , cache_dir=snake_case__ )
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(snake_case__ , os.listdir(snake_case__ )[0] , 'snapshots' ) )]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=snake_case__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(snake_case__ , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(snake_case__ ) == num_samples
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=snake_case__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=snake_case__ , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=snake_case__ , safety_checker=snake_case__ , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) , snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ , )
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ , use_memory_efficient_attention=snake_case__ , )
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 439 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 131 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : Optional[int] , _A : Tuple=7 , _A : Optional[int]=3 , _A : Optional[Any]=18 , _A : Dict=30 , _A : str=400 , _A : Optional[int]=True , _A : str=None , _A : str=True , _A : str=None , _A : List[str]=True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
__SCREAMING_SNAKE_CASE : Tuple = max_resolution
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
__SCREAMING_SNAKE_CASE : int = size
__SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
__SCREAMING_SNAKE_CASE : Optional[int] = do_flip_channel_order
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_flip_channel_order''' ) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 131 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__: str = logging.get_logger(__name__)
lowerCAmelCase__: Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
lowerCAmelCase__: Optional[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : str = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Tuple = {}
import re
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[int] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[int] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[int] = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_encoder_block_conv_in.match(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : Any = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_conv_in.sub(A__ , A__ )
elif re_encoder_block_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_encoder_block_resnet.match(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : Dict = {'''1''': 1, '''3''': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : List[Any] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.sub(A__ , A__ )
elif re_encoder_block_proj_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = re_encoder_block_proj_out.match(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Any = re_encoder_block_proj_out.sub(A__ , A__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : str = re_decoder_block_conv_out.match(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Tuple = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Any = re_decoder_block_conv_out.sub(A__ , A__ )
elif re_decoder_block_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = re_decoder_block_resnet.match(A__ )
SCREAMING_SNAKE_CASE_ : Dict = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : int = {'''1''': 1, '''3''': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : Dict = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Dict = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[str] = re_decoder_block_resnet.sub(A__ , A__ )
elif re_decoder_block_proj_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = re_decoder_block_proj_in.match(A__ )
SCREAMING_SNAKE_CASE_ : Dict = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : str = re_decoder_block_proj_in.sub(A__ , A__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : int = re_prior_cond_conv_out.match(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_conv_out.sub(A__ , A__ )
elif re_prior_cond_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : Dict = re_prior_cond_resnet.match(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : List[Any] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : str = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_prior_cond_resnet.sub(A__ , A__ )
elif re_prior_cond_proj_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = re_prior_cond_proj_in.match(A__ )
SCREAMING_SNAKE_CASE_ : Dict = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Optional[int] = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_prior_cond_proj_in.sub(A__ , A__ )
# keep original key
else:
SCREAMING_SNAKE_CASE_ : List[Any] = original_key
SCREAMING_SNAKE_CASE_ : int = replace_key(A__ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
SCREAMING_SNAKE_CASE_ : List[str] = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
SCREAMING_SNAKE_CASE_ : List[str] = original_key
SCREAMING_SNAKE_CASE_ : str = original_key
SCREAMING_SNAKE_CASE_ : Tuple = value
return new_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
SCREAMING_SNAKE_CASE_ : Tuple = requests.get(f'{PREFIX}{file}' , allow_redirects=A__ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=A__ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
SCREAMING_SNAKE_CASE_ : Tuple = MODEL_MAPPING[model_name.split('/' )[-1]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = JukeboxConfig.from_pretrained(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = JukeboxModel(A__ )
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Any = {}
for i, dict_name in enumerate(A__ ):
SCREAMING_SNAKE_CASE_ : Dict = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = old_dic[k]
elif k.endswith('.w' ):
SCREAMING_SNAKE_CASE_ : Any = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE_ : Tuple = old_dic[k]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = old_dic[k]
SCREAMING_SNAKE_CASE_ : Any = '''vqvae''' if i == 0 else f'priors.{3 - i}'
SCREAMING_SNAKE_CASE_ : str = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ )
weight_dict.append(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = weight_dict.pop(0 )
model.vqvae.load_state_dict(A__ )
for i in range(len(A__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A__ ).mkdir(exist_ok=A__ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(A__ , A__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
return weight_dict
if __name__ == "__main__":
lowerCAmelCase__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
lowerCAmelCase__: List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 345 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :int = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ :int = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ :List[Any] = numpy_to_pil(_a )
return images
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
if images.ndim == 3:
lowercase_ :List[Any] = images[None, ...]
lowercase_ :str = (images * 2_5_5).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ :str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
lowercase_ :List[str] = [Image.fromarray(_a ) for image in images]
return pil_images
| 719 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCamelCase ( _a ) -> float:
'''simple docstring'''
return np.dot(_a , _a )
class UpperCamelCase :
'''simple docstring'''
def __init__( self , *,
UpperCamelCase_ = np.inf , UpperCamelCase_ = "linear" , UpperCamelCase_ = 0.0 , ):
lowercase_ :Dict = regularization
lowercase_ :Optional[Any] = gamma
if kernel == "linear":
lowercase_ :Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowercase_ :Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase_ :str = f"Unknown kernel: {kernel}"
raise ValueError(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
return np.dot(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = observations
lowercase_ :Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase_) , ) :Optional[Any] = np.shape(UpperCamelCase_ )
def to_minimize(UpperCamelCase_ ) -> float:
lowercase_ :Dict = 0
((lowercase_) , ) :Tuple = np.shape(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCamelCase_ )
lowercase_ :Dict = LinearConstraint(UpperCamelCase_ , 0 , 0 )
lowercase_ :Optional[Any] = Bounds(0 , self.regularization )
lowercase_ :Any = minimize(
UpperCamelCase_ , np.ones(UpperCamelCase_ ) , bounds=UpperCamelCase_ , constraints=[ly_contraint] ).x
lowercase_ :Tuple = l_star
# calculating mean offset of separation plane to points
lowercase_ :str = 0
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase_ :List[str] = s / n
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCamelCase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441 | 0 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__snake_case: Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ = CLIPConfig
a_ = ["CLIPEncoderLayer"]
def __init__( self , lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
a_ : int = CLIPVisionModelWithProjection(config.vision_config )
a_ : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
a_ : Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0.5 , lowerCAmelCase_=0.5 ):
'''simple docstring'''
a_ : int = self.vision_model(lowerCAmelCase_ )[0]
a_ : Optional[int] = self.p_head(lowerCAmelCase_ )
a_ : Tuple = nsfw_detected.flatten()
a_ : Union[str, Any] = nsfw_detected > p_threshold
a_ : str = nsfw_detected.tolist()
if any(lowerCAmelCase_ ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(lowerCAmelCase_ ):
if nsfw_detected_:
a_ : Tuple = np.zeros(images[idx].shape )
a_ : Optional[int] = self.w_head(lowerCAmelCase_ )
a_ : List[str] = watermark_detected.flatten()
a_ : Optional[Any] = watermark_detected > w_threshold
a_ : Tuple = watermark_detected.tolist()
if any(lowerCAmelCase_ ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(lowerCAmelCase_ ):
if watermark_detected_:
a_ : Tuple = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 577 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
SCREAMING_SNAKE_CASE__ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
SCREAMING_SNAKE_CASE__ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
SCREAMING_SNAKE_CASE__ = reader.read()
SCREAMING_SNAKE_CASE__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE__ = config[key]
del config[key]
SCREAMING_SNAKE_CASE__ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
SCREAMING_SNAKE_CASE__ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
SCREAMING_SNAKE_CASE__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
SCREAMING_SNAKE_CASE__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
SCREAMING_SNAKE_CASE__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
SCREAMING_SNAKE_CASE__ = param_value
SCREAMING_SNAKE_CASE__ = True
if not has_changed:
SCREAMING_SNAKE_CASE__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : Any , lowercase_ : int , lowercase_ : Dict ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : int , ) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def UpperCamelCase ( lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[int] , ) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
_lowerCamelCase , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowercase__ : TransformeraDModel , lowercase__ : AutoencoderKL , lowercase__ : KarrasDiffusionSchedulers , lowercase__ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__ )
# create a imagenet -> id dictionary for easier use
__lowercase : int = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowercase : List[str] = int(lowercase__ )
__lowercase : str = dict(sorted(self.labels.items() ) )
def snake_case ( self : str , lowercase__ : Union[str, List[str]] ):
if not isinstance(lowercase__ , lowercase__ ):
__lowercase : Any = list(lowercase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowercase__ : List[int] , lowercase__ : float = 4.0 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : int = 5_0 , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ):
__lowercase : Dict = len(lowercase__ )
__lowercase : Tuple = self.transformer.config.sample_size
__lowercase : str = self.transformer.config.in_channels
__lowercase : int = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__lowercase : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowercase : str = torch.tensor(lowercase__ , device=self.device ).reshape(-1 )
__lowercase : Optional[int] = torch.tensor([1_0_0_0] * batch_size , device=self.device )
__lowercase : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowercase : List[Any] = latent_model_input[: len(lowercase__ ) // 2]
__lowercase : Optional[Any] = torch.cat([half, half] , dim=0 )
__lowercase : str = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
__lowercase : List[str] = t
if not torch.is_tensor(lowercase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowercase : List[str] = latent_model_input.device.type == "mps"
if isinstance(lowercase__ , lowercase__ ):
__lowercase : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
__lowercase : Dict = torch.intaa if is_mps else torch.intaa
__lowercase : List[Any] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowercase : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowercase : Optional[Any] = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__ ).sample
# perform guidance
if guidance_scale > 1:
__lowercase ,__lowercase : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowercase ,__lowercase : Optional[int] = torch.split(lowercase__ , len(lowercase__ ) // 2 , dim=0 )
__lowercase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowercase : str = torch.cat([half_eps, half_eps] , dim=0 )
__lowercase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowercase ,__lowercase : int = torch.split(lowercase__ , lowercase__ , dim=1 )
else:
__lowercase : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__lowercase : Optional[Any] = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
if guidance_scale > 1:
__lowercase ,__lowercase : Tuple = latent_model_input.chunk(2 , dim=0 )
else:
__lowercase : Tuple = latent_model_input
__lowercase : Any = 1 / self.vae.config.scaling_factor * latents
__lowercase : Optional[int] = self.vae.decode(lowercase__ ).sample
__lowercase : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : int = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__ )
| 575 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Union[str, Any] , _A : Optional[Any]=13 , _A : Optional[Any]=7 , _A : List[Any]=True , _A : Tuple=True , _A : List[str]=True , _A : Dict=True , _A : str=99 , _A : List[Any]=32 , _A : Union[str, Any]=5 , _A : Dict=4 , _A : List[str]=37 , _A : Dict="gelu" , _A : Optional[int]=0.1 , _A : List[Any]=0.1 , _A : str=128 , _A : List[str]=32 , _A : Union[str, Any]=16 , _A : Dict=2 , _A : Dict=0.0_2 , _A : List[str]=3 , _A : List[str]=4 , _A : Union[str, Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Dict = use_token_type_ids
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[int] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : str = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : int = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
UpperCAmelCase__
) : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : Tuple , _A : str , _A : Union[str, Any] , _A : Any , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = NezhaModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A )
UpperCAmelCase__ : Any = model(_A , token_type_ids=_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : List[str] , _A : List[str] , _A : List[Any] , _A : Tuple , _A : Union[str, Any] , _A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = NezhaModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Any = model(
_A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
UpperCAmelCase__ : Any = model(
_A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , )
UpperCAmelCase__ : int = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Tuple , _A : List[str] , _A : Optional[int] , _A : int , _A : Dict , _A : Dict , _A : List[Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = NezhaForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[Any] , _A : Tuple , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : Dict , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = NezhaForNextSentencePrediction(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self : str , _A : Tuple , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , _A : List[Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = NezhaForPreTraining(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , next_sentence_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : Dict , _A : Optional[int] , _A : int , _A : int , _A : Dict , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = NezhaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Dict , _A : str , _A : Union[str, Any] , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Dict = NezhaForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Tuple = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : List[Any] , _A : str , _A : Union[str, Any] , _A : int , _A : str , _A : List[Any] , _A : Tuple , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : Optional[int] = NezhaForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[str] , _A : Optional[Any] , _A : str , _A : Any , _A : Tuple , _A : List[str] , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Union[str, Any] = NezhaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : Dict = config_and_inputs
UpperCAmelCase__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
def lowercase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
UpperCAmelCase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = NezhaModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
(
UpperCAmelCase__
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ : str = None
self.model_tester.create_and_check_model_as_decoder(
_A , _A , _A , _A , _A , _A , _A , _A , _A , )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = NezhaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Tuple = model_class(config=_A )
UpperCAmelCase__ : str = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''bert.pt''' ) )
UpperCAmelCase__ : Optional[Any] = torch.jit.load(os.path.join(_A , '''bert.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
UpperCAmelCase__ : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : Any = model(_A , attention_mask=_A )[0]
UpperCAmelCase__ : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _A )
UpperCAmelCase__ : List[str] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
UpperCAmelCase__ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : str = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(_A , attention_mask=_A )[0]
UpperCAmelCase__ : int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , _A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 714 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase__ : Optional[Any] = (boundary[1] - boundary[0]) / steps
UpperCAmelCase__ : Dict = boundary[0]
UpperCAmelCase__ : int = boundary[1]
UpperCAmelCase__ : Dict = make_points(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = 0.0
y += (h / 2.0) * f(lowerCAmelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCAmelCase__ )
y += (h / 2.0) * f(lowerCAmelCase__ )
return y
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : List[Any] = a + h
while x < (b - h):
yield x
UpperCAmelCase__ : List[Any] = x + h
def a__ ( lowerCAmelCase__ ) -> Optional[int]: # enter your function here
UpperCAmelCase__ : int = (x - 0) * (x - 0)
return y
def a__ ( ) -> str:
UpperCAmelCase__ : Dict = 0.0 # Lower bound of integration
UpperCAmelCase__ : Dict = 1.0 # Upper bound of integration
UpperCAmelCase__ : Optional[Any] = 1_0.0 # define number of steps or resolution
UpperCAmelCase__ : str = [a, b] # define boundary of integration
UpperCAmelCase__ : Dict = method_a(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 312 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( __lowercase ):
_UpperCAmelCase = "upernet"
def __init__( self ,_A=None ,_A=512 ,_A=0.0_2 ,_A=[1, 2, 3, 6] ,_A=True ,_A=0.4 ,_A=384 ,_A=256 ,_A=1 ,_A=False ,_A=255 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCAmelCase : int = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_A ,_A ):
_lowerCAmelCase : List[Any] = backbone_config.get('model_type' )
_lowerCAmelCase : Any = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(_A )
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Dict = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : str = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_in_channels
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : int = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Union[str, Any] = loss_ignore_index
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : str = self.backbone_config.to_dict()
_lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 259 |
from __future__ import annotations
def __A(lowerCAmelCase , lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = sum(lowerCAmelCase )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return result
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> None:
"""simple docstring"""
if sum(lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase )) < max_sum:
return
if sum(lowerCAmelCase ) == max_sum:
result.append(lowerCAmelCase )
return
for index in range(lowerCAmelCase , len(lowerCAmelCase ) ):
create_state_space_tree(
lowerCAmelCase , lowerCAmelCase , index + 1 , [*path, nums[index]] , lowerCAmelCase , remaining_nums_sum - nums[index] , )
lowerCamelCase__ = [3, 34, 4, 12, 5, 2]
lowerCamelCase__ = 9
lowerCamelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 612 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_: Tuple = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase_: Tuple = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Dict = SavedModel()
snake_case__ : Tuple = []
with open(os.path.join(UpperCAmelCase_ , """utils""" , """tf_ops""" , """onnx.json""")) as f:
snake_case__ : Tuple = json.load(UpperCAmelCase_)["""opsets"""]
for i in range(1 , opset + 1):
onnx_ops.extend(onnx_opsets[str(UpperCAmelCase_)])
with open(UpperCAmelCase_ , """rb""") as f:
saved_model.ParseFromString(f.read())
snake_case__ : Dict = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node)
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def)
# Convert to list, sorted if you want
snake_case__ : str = sorted(UpperCAmelCase_)
snake_case__ : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(UpperCAmelCase_)
if strict and len(UpperCAmelCase_) > 0:
raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops)
elif len(UpperCAmelCase_) > 0:
print(F'Found the following incompatible ops for the opset {opset}:')
print(*UpperCAmelCase_ , sep="""\n""")
else:
print(F'The saved model {saved_model_path} can properly be converted with ONNX.')
if __name__ == "__main__":
lowercase_: str = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
lowercase_: Optional[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 705 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
snake_case__ : Dict = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""")
if "img_encoder.patch_embed.proj" in name:
snake_case__ : Optional[Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""")
if "img_encoder.patch_embed.norm" in name:
snake_case__ : Dict = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""")
if "img_encoder.layers" in name:
snake_case__ : List[str] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""")
if "blocks" in name and "res" not in name:
snake_case__ : Optional[int] = name.replace("""blocks""" , """layers""")
if "attn" in name and "pre_assign" not in name:
snake_case__ : str = name.replace("""attn""" , """self_attn""")
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case__ : Optional[int] = name.replace("""proj""" , """out_proj""")
if "pre_assign_attn.attn.proj" in name:
snake_case__ : Optional[int] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""")
if "norm1" in name:
snake_case__ : Union[str, Any] = name.replace("""norm1""" , """layer_norm1""")
if "norm2" in name and "pre_assign" not in name:
snake_case__ : List[str] = name.replace("""norm2""" , """layer_norm2""")
if "img_encoder.norm" in name:
snake_case__ : Any = name.replace("""img_encoder.norm""" , """vision_model.layernorm""")
# text encoder
if "text_encoder.token_embedding" in name:
snake_case__ : Optional[Any] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""")
if "text_encoder.positional_embedding" in name:
snake_case__ : int = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""")
if "text_encoder.transformer.resblocks." in name:
snake_case__ : Tuple = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""")
if "ln_1" in name:
snake_case__ : Optional[Any] = name.replace("""ln_1""" , """layer_norm1""")
if "ln_2" in name:
snake_case__ : Optional[int] = name.replace("""ln_2""" , """layer_norm2""")
if "c_fc" in name:
snake_case__ : int = name.replace("""c_fc""" , """fc1""")
if "c_proj" in name:
snake_case__ : List[str] = name.replace("""c_proj""" , """fc2""")
if "text_encoder" in name:
snake_case__ : Dict = name.replace("""text_encoder""" , """text_model""")
if "ln_final" in name:
snake_case__ : Union[str, Any] = name.replace("""ln_final""" , """final_layer_norm""")
# projection layers
if "img_projector.linear_hidden." in name:
snake_case__ : int = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""")
if "img_projector.linear_out." in name:
snake_case__ : Optional[Any] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""")
if "text_projector.linear_hidden" in name:
snake_case__ : Optional[int] = name.replace("""text_projector.linear_hidden""" , """text_projection""")
if "text_projector.linear_out" in name:
snake_case__ : Tuple = name.replace("""text_projector.linear_out""" , """text_projection.3""")
return name
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ : List[Any] = orig_state_dict.pop(UpperCAmelCase_)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ : Optional[Any] = key.split(""".""")
snake_case__ , snake_case__ : str = int(key_split[2]), int(key_split[4])
snake_case__ : Tuple = config.vision_config.hidden_size
if "weight" in key:
snake_case__ : Tuple = val[:dim, :]
snake_case__ : List[Any] = val[dim : dim * 2, :]
snake_case__ : Optional[int] = val[-dim:, :]
else:
snake_case__ : Dict = val[:dim]
snake_case__ : List[Any] = val[dim : dim * 2]
snake_case__ : Union[str, Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ : List[str] = key.split(""".""")
snake_case__ : str = int(key_split[3])
snake_case__ : Tuple = config.text_config.hidden_size
if "weight" in key:
snake_case__ : Any = val[:dim, :]
snake_case__ : Optional[int] = val[
dim : dim * 2, :
]
snake_case__ : List[str] = val[-dim:, :]
else:
snake_case__ : Tuple = val[:dim]
snake_case__ : List[str] = val[dim : dim * 2]
snake_case__ : Optional[int] = val[-dim:]
else:
snake_case__ : List[str] = rename_key(UpperCAmelCase_)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case__ : Union[str, Any] = val.squeeze_()
else:
snake_case__ : List[str] = val
return orig_state_dict
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_).raw)
return im
@torch.no_grad()
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="groupvit-gcc-yfcc" , UpperCAmelCase_=False):
"""simple docstring"""
snake_case__ : Any = GroupViTConfig()
snake_case__ : List[Any] = GroupViTModel(UpperCAmelCase_).eval()
snake_case__ : List[str] = torch.load(UpperCAmelCase_ , map_location="""cpu""")["""model"""]
snake_case__ : List[Any] = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_)
snake_case__ , snake_case__ : Tuple = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase_) == 0)
# verify result
snake_case__ : Optional[Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""")
snake_case__ : str = prepare_img()
snake_case__ : int = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""")
with torch.no_grad():
snake_case__ : Dict = model(**UpperCAmelCase_)
if model_name == "groupvit-gcc-yfcc":
snake_case__ : List[str] = torch.tensor([[13.3523, 6.3629]])
elif model_name == "groupvit-gcc-redcaps":
snake_case__ : List[str] = torch.tensor([[16.1873, 8.6230]])
else:
raise ValueError(F'Model name {model_name} not supported.')
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase_ , atol=1e-3)
processor.save_pretrained(UpperCAmelCase_)
model.save_pretrained(UpperCAmelCase_)
print("""Successfully saved processor and model to""" , UpperCAmelCase_)
if push_to_hub:
print("""Pushing to the hub...""")
processor.push_to_hub(UpperCAmelCase_ , organization="""nielsr""")
model.push_to_hub(UpperCAmelCase_ , organization="""nielsr""")
if __name__ == "__main__":
lowercase_: List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowercase_: Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 127 | 0 |
from __future__ import annotations
import math
lowerCAmelCase = '2020.9.26'
lowerCAmelCase = 'xcodz-dot, cclaus, dhruvmanila'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not all(isinstance(SCREAMING_SNAKE_CASE , (float, int) ) for val in locals().values() ):
lowercase__ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(SCREAMING_SNAKE_CASE )
lowercase__ = ((x * distance) / (z + distance)) * scale
lowercase__ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''Axis must be a str''' )
lowercase__ = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE , (float, int) ) for val in input_variables.values() ):
lowercase__ = (
'''Input values except axis must either be float or int: '''
f'{list(input_variables.values() )}'
)
raise TypeError(SCREAMING_SNAKE_CASE )
lowercase__ = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
lowercase__ = x * math.cos(SCREAMING_SNAKE_CASE ) - y * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = y * math.cos(SCREAMING_SNAKE_CASE ) + x * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = z
elif axis == "x":
lowercase__ = y * math.cos(SCREAMING_SNAKE_CASE ) - z * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = z * math.cos(SCREAMING_SNAKE_CASE ) + y * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = x
elif axis == "y":
lowercase__ = x * math.cos(SCREAMING_SNAKE_CASE ) - z * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = z * math.cos(SCREAMING_SNAKE_CASE ) + x * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
| 43 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
lowerCamelCase__ = load_dataset("""ashraq/esc50""" )
lowerCamelCase__ = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCamelCase__ = audio_classifier(a_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a_ ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
@require_torch
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
lowerCamelCase__ = load_dataset("""ashraq/esc50""" )
lowerCamelCase__ = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCamelCase__ = audio_classifier(a_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a_ ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
lowerCamelCase__ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
lowerCamelCase__ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
| 165 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: int = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: str = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: str = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: List[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | """simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
SCREAMING_SNAKE_CASE : str = '''src/diffusers'''
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE : int = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
SCREAMING_SNAKE_CASE : Any = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
SCREAMING_SNAKE_CASE : Union[str, Any] = '''
{0} = None
'''
SCREAMING_SNAKE_CASE : int = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
SCREAMING_SNAKE_CASE : List[str] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __UpperCAmelCase ( snake_case_ : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = _re_backend.findall(snake_case_ )
if len(snake_case_ ) == 0:
return None
return "_and_".join(snake_case_ )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(snake_case_ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
_lowerCAmelCase = 0
_lowerCAmelCase = {}
# Go through the end of the file
while line_index < len(snake_case_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_lowerCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
_lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case_ ) and len(lines[line_index] ) > 1:
_lowerCAmelCase = lines[line_index]
_lowerCAmelCase = _re_single_line_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case_ ) > 0:
_lowerCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : List[Any] ) -> Any:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(snake_case_ )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case_ , snake_case_ )
else:
return DUMMY_CLASS.format(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if backend_specific_objects is None:
_lowerCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_lowerCAmelCase = {}
for backend, objects in backend_specific_objects.items():
_lowerCAmelCase = """[""" + """, """.join(F"""\"{b}\"""" for b in backend.split("""_and_""" ) ) + """]"""
_lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case_ , snake_case_ ) for o in objects] )
_lowerCAmelCase = dummy_file
return dummy_files
def __UpperCAmelCase ( snake_case_ : Any=False ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_lowerCAmelCase = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
_lowerCAmelCase = os.path.join(snake_case_ , """utils""" )
_lowerCAmelCase = {
backend: os.path.join(snake_case_ , F"""dummy_{short_names.get(snake_case_ , snake_case_ )}_objects.py""" )
for backend in dummy_files.keys()
}
_lowerCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case_ ):
with open(snake_case_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.read()
else:
_lowerCAmelCase = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(snake_case_ , snake_case_ )}_objects.py as the main """
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F"""diffusers.utils.dummy_{short_names.get(snake_case_ , snake_case_ )}_objects.py. Run `make fix-copies` """
"""to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 156 |
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : int = logging.getLogger()
SCREAMING_SNAKE_CASE : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( __lowercase ):
def A__ (self , lowerCamelCase ):
'''simple docstring'''
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
_lowerCAmelCase = {"""source""": """What is love ?""", """target""": """life"""}
_lowerCAmelCase = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCAmelCase = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = "pytorch" ):
'''simple docstring'''
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = os.path.join(lowerCamelCase , """output""" )
_lowerCAmelCase = os.path.join(lowerCamelCase , """data""" )
self._create_dummy_data(data_dir=lowerCamelCase )
_lowerCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
_lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
_lowerCAmelCase = os.path.join(lowerCamelCase , """metrics.json""" )
with open(lowerCamelCase ) as f:
_lowerCAmelCase = json.load(lowerCamelCase )
return result
@require_torch_gpu
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) | 156 | 1 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase__ ( _lowercase : Optional[int] , _lowercase : Tuple ) -> List[str]:
# Load checkpoint
__UpperCAmelCase: Union[str, Any] = torch.load(__A , map_location="""cpu""" )
__UpperCAmelCase: Union[str, Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__UpperCAmelCase: List[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__UpperCAmelCase: Dict = v
else:
__UpperCAmelCase: Union[str, Any] = v
__UpperCAmelCase: Dict = chkpt["""params"""]
__UpperCAmelCase: Dict = {n: v for n, v in config.items() if not isinstance(__A , (torch.FloatTensor, numpy.ndarray) )}
__UpperCAmelCase: str = chkpt["""dico_word2id"""]
__UpperCAmelCase: List[str] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 1_3 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
__UpperCAmelCase: Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__UpperCAmelCase: Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__UpperCAmelCase: List[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__A , __A )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__A , indent=2 ) + """\n""" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__A , indent=2 ) + """\n""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 701 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """timesformer"""
def __init__( self , snake_case_=224 , snake_case_=16 , snake_case_=3 , snake_case_=8 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1e-6 , snake_case_=True , snake_case_="divided_space_time" , snake_case_=0 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
__UpperCAmelCase: Tuple = image_size
__UpperCAmelCase: List[Any] = patch_size
__UpperCAmelCase: Optional[Any] = num_channels
__UpperCAmelCase: int = num_frames
__UpperCAmelCase: List[str] = hidden_size
__UpperCAmelCase: List[str] = num_hidden_layers
__UpperCAmelCase: Dict = num_attention_heads
__UpperCAmelCase: List[str] = intermediate_size
__UpperCAmelCase: Union[str, Any] = hidden_act
__UpperCAmelCase: Dict = hidden_dropout_prob
__UpperCAmelCase: Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase: Union[str, Any] = initializer_range
__UpperCAmelCase: List[Any] = layer_norm_eps
__UpperCAmelCase: int = qkv_bias
__UpperCAmelCase: str = attention_type
__UpperCAmelCase: Tuple = drop_path_rate | 466 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__A : List[str] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : str , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase ) | 16 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
snake_case = random.Random()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> List[str]:
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : List[Any]=4_0_0 , __lowerCamelCase : Any=2_0_0_0 , __lowerCamelCase : Any=2_0_4_8 , __lowerCamelCase : Any=1_2_8 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=5_1_2 , __lowerCamelCase : Tuple=3_0 , __lowerCamelCase : List[Any]=4_4_1_0_0 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = spectrogram_length
_snake_case = feature_size
_snake_case = num_audio_channels
_snake_case = hop_length
_snake_case = chunk_length
_snake_case = sampling_rate
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any=False , __lowerCamelCase : int=False ):
"""simple docstring"""
def _flatten(__lowerCamelCase : List[str] ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Tuple = TvltFeatureExtractor
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = TvltFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''hop_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''sampling_rate''' ) )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
_snake_case = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(__lowerCamelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__lowerCamelCase )
_snake_case = self.feature_extraction_class.from_json_file(__lowerCamelCase )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
# Initialize feature_extractor
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_snake_case = feature_extractor(__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_snake_case = feature_extractor(
__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_snake_case = np.asarray(__lowerCamelCase )
_snake_case = feature_extractor(__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_snake_case = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self._load_datasamples(1 )
_snake_case = TvltFeatureExtractor()
_snake_case = feature_extractor(__lowerCamelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_snake_case = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCamelCase , atol=1E-4 ) )
| 103 | 0 |
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ = num - 1
SCREAMING_SNAKE_CASE__ = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ = pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if v != 1:
SCREAMING_SNAKE_CASE__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ = i + 1
SCREAMING_SNAKE_CASE__ = (v**2) % num
return True
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> bool:
if num < 2:
return False
SCREAMING_SNAKE_CASE__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1_024 ) -> int:
while True:
SCREAMING_SNAKE_CASE__ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__UpperCAmelCase ):
return num
if __name__ == "__main__":
_A = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 538 | """simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _snake_case : List[str] , _snake_case : List[Any]=13 , _snake_case : List[str]=7 , _snake_case : Dict=True , _snake_case : str=True , _snake_case : Optional[Any]=True , _snake_case : Tuple=True , _snake_case : List[Any]=99 , _snake_case : Dict=16 , _snake_case : Tuple=36 , _snake_case : Optional[int]=6 , _snake_case : Optional[int]=6 , _snake_case : Tuple=6 , _snake_case : Optional[int]=37 , _snake_case : Dict="gelu" , _snake_case : str=0.1 , _snake_case : Tuple=0.1 , _snake_case : List[str]=512 , _snake_case : Any=16 , _snake_case : Optional[int]=2 , _snake_case : Optional[int]=0.02 , _snake_case : Union[str, Any]=3 , _snake_case : int=4 , _snake_case : Optional[int]=None , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_hidden_groups
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase_ ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ) -> Union[str, Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : int , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = AlbertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case , token_type_ids=_snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[Any] , _snake_case : int , _snake_case : str , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = AlbertForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , sentence_order_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int , _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str , _snake_case : Tuple , _snake_case : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = AlbertForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlbertForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : int , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = AlbertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , _snake_case : Any , _snake_case : str , _snake_case : List[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = AlbertForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple , _snake_case : str , _snake_case : Any , _snake_case : Any , _snake_case : int , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = AlbertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : Dict , _snake_case : str , _snake_case : str=False ) -> Dict:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = AlbertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def lowerCAmelCase_ ( self : List[str] ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def lowerCAmelCase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def lowerCAmelCase_ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*_snake_case )
@slow
def lowerCAmelCase_ ( self : Any ) -> str:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = AlbertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _snake_case )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1e-4 ) )
| 538 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = (UnCLIPScheduler,)
def _UpperCamelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = {
'num_train_timesteps': 10_00,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def _UpperCamelCase ( self : Dict ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , prev_timestep=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='fixed_small_log' )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_99_49_87 ) ) < 1e-5
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='learned_range' )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -0.0_01_00_11 < 1e-5
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE__ ):
# 1. predict noise residual
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(25 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE__ ):
# 1. predict noise residual
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_timestep=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def _UpperCamelCase ( self : Optional[Any] ):
pass
def _UpperCamelCase ( self : int ):
pass
| 510 |
"""simple docstring"""
def snake_case ( _a: list , _a: int = 0 )-> list:
'''simple docstring'''
lowerCamelCase__ = length or len(_a )
lowerCamelCase__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCamelCase__ , lowerCamelCase__ = list_data[i + 1], list_data[i]
lowerCamelCase__ = True
return list_data if not swapped else bubble_sort(_a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 | 1 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __A ( a_ : List[str] ,a_ : Dict ,a_ : List[Any] ,a_ : Optional[int]=1_0_2_4 ):
lowerCAmelCase , lowerCAmelCase : Dict = [], []
lowerCAmelCase : Optional[Any] = list(zip(a_ ,a_ ) )
lowerCAmelCase , lowerCAmelCase : Any = sorted_examples[0]
def is_too_big(a_ : Any ):
return tok(a_ ,return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCAmelCase : Optional[int] = new_src + " " + src
lowerCAmelCase : Union[str, Any] = new_tgt + " " + tgt
if is_too_big(a_ ) or is_too_big(a_ ): # cant fit, finalize example
finished_src.append(a_ )
finished_tgt.append(a_ )
lowerCAmelCase , lowerCAmelCase : str = src, tgt
else: # can fit, keep adding
lowerCAmelCase , lowerCAmelCase : List[str] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a_ )
finished_tgt.append(a_ )
return finished_src, finished_tgt
def __A ( a_ : List[str] ,a_ : Path ,a_ : Any ,a_ : int ):
lowerCAmelCase : Optional[int] = Path(a_ )
save_path.mkdir(exist_ok=a_ )
for split in ["train"]:
lowerCAmelCase , lowerCAmelCase : Tuple = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
lowerCAmelCase : Tuple = [x.rstrip() for x in Path(a_ ).open().readlines()]
lowerCAmelCase : str = [x.rstrip() for x in Path(a_ ).open().readlines()]
lowerCAmelCase , lowerCAmelCase : Tuple = pack_examples(a_ ,a_ ,a_ ,a_ )
print(f'''packed {split} split from {len(a_ )} examples -> {len(a_ )}.''' )
Path(save_path / f'''{split}.source''' ).open("w" ).write("\n".join(a_ ) )
Path(save_path / f'''{split}.target''' ).open("w" ).write("\n".join(a_ ) )
for split in ["val", "test"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(a_ ,save_path / f'''{split}.source''' )
shutil.copyfile(a_ ,save_path / f'''{split}.target''' )
def __A ( ):
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--tok_name" ,type=a_ ,help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" ,type=a_ ,default=1_2_8 )
parser.add_argument("--data_dir" ,type=a_ )
parser.add_argument("--save_path" ,type=a_ )
lowerCAmelCase : Optional[Any] = parser.parse_args()
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a_ ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 551 |
'''simple docstring'''
from collections import namedtuple
lowerCAmelCase = namedtuple("""from_to""", """from_ to""")
lowerCAmelCase = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 10_00),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def __A ( a_ : float ,a_ : str ,a_ : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 2 ) -> Optional[int]:
def get_dataset(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__SCREAMING_SNAKE_CASE , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ = get_dataset(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = get_dataset(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , num_workers=4 )
UpperCAmelCase_ = DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , num_workers=4 )
return (train_dataloader, valid_dataloader)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
UpperCAmelCase_ = []
for epoch in range(__SCREAMING_SNAKE_CASE ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ = batch
UpperCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.nn.functional.mse_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ = nn.Parameter(torch.randn(1 ) )
def A__ ( self , lowerCAmelCase ):
return x * self.a + self.b
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase , automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
UpperCAmelCase_ = Accelerator(project_config=lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
UpperCAmelCase_ = os.path.join(lowerCAmelCase , "initial" )
accelerator.save_state(lowerCAmelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
UpperCAmelCase_ = train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.load_state(lowerCAmelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = train(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save everything
UpperCAmelCase_ = os.path.join(lowerCAmelCase , "checkpoint" )
accelerator.save_state(lowerCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase )
test_rands += train(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
UpperCAmelCase_ = train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase )
UpperCAmelCase_ = Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.load_state(os.path.join(lowerCAmelCase , "checkpoints" , "checkpoint_0" ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = train(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = torch.tensor([1, 2, 3] )
UpperCAmelCase_ = torch.tensor([2, 3, 4] )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ = Accelerator()
with self.assertRaises(lowerCAmelCase ) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ = torch.optim.lr_scheduler.StepLR(lowerCAmelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ = scheduler.state_dict()
train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertNotEqual(lowerCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(lowerCAmelCase , scheduler.state_dict() )
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
UpperCAmelCase_ = accelerator.prepare(lowerCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def A__ ( self ):
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = "/tmp/accelerate/state_checkpointing"
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters(), lr=1e-3)
SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE = group["params"][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 579 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase = None ):
UpperCAmelCase_ = (
os.path.join(lowerCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCAmelCase_ = Extractor
def A__ ( self , lowerCAmelCase ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCAmelCase_ = os.path.abspath(lowerCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return force_extract or (
not os.path.isfile(lowerCAmelCase ) and not (os.path.isdir(lowerCAmelCase ) and os.listdir(lowerCAmelCase ))
)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ):
UpperCAmelCase_ = self.extractor.infer_extractor_format(lowerCAmelCase )
if not extractor_format:
return input_path
UpperCAmelCase_ = self._get_output_path(lowerCAmelCase )
if self._do_extract(lowerCAmelCase , lowerCAmelCase ):
self.extractor.extract(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return output_path
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def A__ ( cls , lowerCAmelCase , **lowerCAmelCase ):
...
@staticmethod
@abstractmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
...
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[bytes] = []
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
with open(lowerCAmelCase , "rb" ) as f:
return f.read(lowerCAmelCase )
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase = b"" ):
if not magic_number:
UpperCAmelCase_ = max(len(lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
UpperCAmelCase_ = cls.read_magic_number(lowerCAmelCase , lowerCAmelCase )
except OSError:
return False
return any(magic_number.startswith(lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@classmethod
def A__ ( cls , lowerCAmelCase , **lowerCAmelCase ):
return tarfile.is_tarfile(lowerCAmelCase )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
def resolved(lowerCAmelCase ) -> str:
return os.path.realpath(os.path.abspath(lowerCAmelCase ) )
def badpath(lowerCAmelCase , lowerCAmelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCAmelCase , lowerCAmelCase ) ).startswith(lowerCAmelCase )
def badlink(lowerCAmelCase , lowerCAmelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCAmelCase_ = resolved(os.path.join(lowerCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCAmelCase )
UpperCAmelCase_ = resolved(lowerCAmelCase )
for finfo in members:
if badpath(finfo.name , lowerCAmelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(lowerCAmelCase , lowerCAmelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(lowerCAmelCase , lowerCAmelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase_ = tarfile.open(lowerCAmelCase )
tar_file.extractall(lowerCAmelCase , members=TarExtractor.safemembers(lowerCAmelCase , lowerCAmelCase ) )
tar_file.close()
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = [b'\x1F\x8B']
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
with gzip.open(lowerCAmelCase , "rb" ) as gzip_file:
with open(lowerCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase = b"" ):
if super().is_extractable(lowerCAmelCase , magic_number=lowerCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCAmelCase , "rb" ) as fp:
UpperCAmelCase_ = _EndRecData(lowerCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCAmelCase_ = fp.read(lowerCAmelCase ) # CD is where we expect it to be
if len(lowerCAmelCase ) == sizeCentralDir:
UpperCAmelCase_ = struct.unpack(lowerCAmelCase , lowerCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with zipfile.ZipFile(lowerCAmelCase , "r" ) as zip_file:
zip_file.extractall(lowerCAmelCase )
zip_file.close()
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
with lzma.open(lowerCAmelCase ) as compressed_file:
with open(lowerCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase_ = rarfile.RarFile(lowerCAmelCase )
rf.extractall(lowerCAmelCase )
rf.close()
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : str = [b'\x28\xb5\x2F\xFD']
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
UpperCAmelCase_ = zstd.ZstdDecompressor()
with open(lowerCAmelCase , "rb" ) as ifh, open(lowerCAmelCase , "wb" ) as ofh:
dctx.copy_stream(lowerCAmelCase , lowerCAmelCase )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = [b'\x42\x5A\x68']
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
with bza.open(lowerCAmelCase , "rb" ) as compressed_file:
with open(lowerCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : str = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with pyazr.SevenZipFile(lowerCAmelCase , "r" ) as archive:
archive.extractall(lowerCAmelCase )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = [b'\x04\x22\x4D\x18']
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(lowerCAmelCase , "rb" ) as compressed_file:
with open(lowerCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A__ ( cls ):
return max(
len(lowerCAmelCase )
for extractor in cls.extractors.values()
if issubclass(lowerCAmelCase , lowerCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase , magic_number_length=lowerCAmelCase )
except OSError:
return b""
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=lowerCAmelCase , )
UpperCAmelCase_ = cls.infer_extractor_format(lowerCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A__ ( cls , lowerCAmelCase ): # <Added version="2.4.0"/>
UpperCAmelCase_ = cls._get_magic_number_max_length()
UpperCAmelCase_ = cls._read_magic_number(lowerCAmelCase , lowerCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCAmelCase , magic_number=lowerCAmelCase ):
return extractor_format
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = "deprecated" , ):
os.makedirs(os.path.dirname(lowerCAmelCase ) , exist_ok=lowerCAmelCase )
# Prevent parallel extractions
UpperCAmelCase_ = str(Path(lowerCAmelCase ).with_suffix(".lock" ) )
with FileLock(lowerCAmelCase ):
shutil.rmtree(lowerCAmelCase , ignore_errors=lowerCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCAmelCase , lowerCAmelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=lowerCAmelCase , )
UpperCAmelCase_ = extractor if extractor != "deprecated" else extractor_format
else:
UpperCAmelCase_ = cls.extractors[extractor_format]
return extractor.extract(lowerCAmelCase , lowerCAmelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=lowerCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCAmelCase ):
return extractor.extract(lowerCAmelCase , lowerCAmelCase )
| 579 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class _lowercase ( _A ):
_a : int = 'xlm'
_a : List[Any] = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , a=3_0_1_4_5 , a=2_0_4_8 , a=1_2 , a=1_6 , a=0.1 , a=0.1 , a=True , a=False , a=False , a=False , a=1 , a=True , a=5_1_2 , a=2_0_4_8**-0.5 , a=1e-12 , a=0.02 , a=0 , a=1 , a=2 , a=3 , a=5 , a=True , a="first" , a=True , a=None , a=True , a=0.1 , a=5 , a=5 , a=0 , a=0 , a=2 , a=0 , **a , ):
snake_case__ : str =vocab_size
snake_case__ : List[Any] =emb_dim
snake_case__ : List[str] =n_layers
snake_case__ : Any =n_heads
snake_case__ : Any =dropout
snake_case__ : Optional[int] =attention_dropout
snake_case__ : Dict =gelu_activation
snake_case__ : Tuple =sinusoidal_embeddings
snake_case__ : Optional[int] =causal
snake_case__ : Optional[int] =asm
snake_case__ : Optional[int] =n_langs
snake_case__ : Union[str, Any] =use_lang_emb
snake_case__ : Optional[Any] =layer_norm_eps
snake_case__ : Union[str, Any] =bos_index
snake_case__ : List[Any] =eos_index
snake_case__ : Optional[Any] =pad_index
snake_case__ : Union[str, Any] =unk_index
snake_case__ : Dict =mask_index
snake_case__ : str =is_encoder
snake_case__ : Optional[Any] =max_position_embeddings
snake_case__ : Any =embed_init_std
snake_case__ : Optional[Any] =init_std
snake_case__ : str =summary_type
snake_case__ : List[Any] =summary_use_proj
snake_case__ : str =summary_activation
snake_case__ : Optional[Any] =summary_proj_to_labels
snake_case__ : Optional[int] =summary_first_dropout
snake_case__ : Optional[Any] =start_n_top
snake_case__ : List[Any] =end_n_top
snake_case__ : Union[str, Any] =mask_token_id
snake_case__ : Any =lang_id
if "n_words" in kwargs:
snake_case__ : int =kwargs["""n_words"""]
super().__init__(pad_token_id=a , bos_token_id=a , **a )
class _lowercase ( _A ):
@property
def lowercase__ ( self ):
if self.task == "multiple-choice":
snake_case__ : Any ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case__ : Union[str, Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 448 |
def A__ ( _a : list ):
'''simple docstring'''
if len(_a ) <= 1:
return [tuple(_a )]
snake_case__ : Optional[int] =[]
def generate(_a : int , _a : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _a )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case__ , snake_case__ : Dict =arr[k - 1], arr[i]
else: # k is odd
snake_case__ , snake_case__ : int =arr[k - 1], arr[0]
generate(k - 1 , _a )
generate(len(_a ) , _a )
return res
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Any = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 448 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = LayoutLMTokenizer
lowercase : List[str] = LayoutLMTokenizerFast
lowercase : Tuple = True
lowercase : Optional[int] = True
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
super().setUp()
A : Dict =[
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
A : Union[str, Any] ='UNwant\u00E9d,running'
A : int ='unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Dict =self.tokenizer_class(self.vocab_file )
A : Tuple =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
pass
| 305 | import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowercase : str =logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowercase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowercase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
A : Tuple =self.task_name.lower()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "train"
lowercase : int = "dev"
lowercase : Union[str, Any] = "test"
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : GlueDataTrainingArguments
lowercase : str
lowercase : List[InputFeatures]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : GlueDataTrainingArguments , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Split] = Split.train , SCREAMING_SNAKE_CASE__ : Optional[str] = None , ) -> List[Any]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , SCREAMING_SNAKE_CASE__ , )
A : Any =args
A : Union[str, Any] =glue_processors[args.task_name]()
A : Union[str, Any] =glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
A : Any =Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
A : Tuple =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
A : Optional[Any] =self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A , A : str =label_list[2], label_list[1]
A : Tuple =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A : int =cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
A : Optional[Any] =time.time()
A : str =torch.load(SCREAMING_SNAKE_CASE__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
A : int =self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A : Dict =self.processor.get_test_examples(args.data_dir )
else:
A : Optional[Any] =self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A : Optional[int] =examples[:limit_length]
A : int =glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_length=args.max_seq_length , label_list=SCREAMING_SNAKE_CASE__ , output_mode=self.output_mode , )
A : List[Any] =time.time()
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
return self.label_list
| 305 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704 | import inspect
import unittest
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _UpperCAmelCase ( self: Dict ) -> Dict:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__UpperCAmelCase = inspect.getmembers(__lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__UpperCAmelCase = "k-diffusion"
elif backend == "invisible_watermark":
__UpperCAmelCase = "invisible-watermark"
assert backend in deps, F'''{backend} is not in the deps table!'''
| 286 | 0 |
from __future__ import annotations
_snake_case = 8.9_88E9 # units = N * m^s * C^-2
def _a ( __lowercase , __lowercase , __lowercase , __lowercase ) -> dict[str, float]:
"""simple docstring"""
__UpperCamelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
__UpperCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCamelCase = abs(__lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCamelCase = abs(__lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCamelCase = (COULOMBS_CONSTANT * charge_product / abs(__lowercase )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __lowercase( self ) -> str:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__UpperCamelCase = TFViTModel(config=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__UpperCamelCase = self.image_size // 2
__UpperCamelCase = pixel_values[:, :, :image_size, :image_size]
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__UpperCamelCase = self.image_size // 2
__UpperCamelCase = pixel_values[:, :, :image_size, :image_size]
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __lowercase( self ) -> List[str]:
__UpperCamelCase = TFViTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowercase( self ) -> str:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowercase( self ) -> Tuple:
pass
def __lowercase( self ) -> int:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def __lowercase( self ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowercase( self ) -> Dict:
__UpperCamelCase = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
# forward pass
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCamelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 383 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
a_ : int = len(bin(__A )[3:] )
a_ : List[Any] = bin(abs(__A ) - (1 << binary_number_length) )[3:]
a_ : str = (
(
'1'
+ '0' * (binary_number_length - len(__A ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443 |
import random
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : str ) -> tuple[list[int], list[int]]:
a_ : int = [ord(SCREAMING_SNAKE_CASE__ ) for i in text]
a_ : Any = []
a_ : Optional[int] = []
for i in plain:
a_ : Tuple = random.randint(1 , 3_0_0 )
a_ : Optional[int] = (i + k) * k
cipher.append(SCREAMING_SNAKE_CASE__ )
key.append(SCREAMING_SNAKE_CASE__ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ) -> str:
a_ : List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
a_ : str = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(SCREAMING_SNAKE_CASE__ ) )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 443 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :Optional[int] = None
_UpperCAmelCase :Optional[jnp.ndarray] = None
_UpperCAmelCase :Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _snake_case ( cls ):
return cls()
@dataclass
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :KarrasVeSchedulerState
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ):
"""simple docstring"""
@property
def _snake_case ( self ):
return True
@register_to_config
def __init__( self , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 100 , _UpperCAmelCase = 1.007 , _UpperCAmelCase = 80 , _UpperCAmelCase = 0.05 , _UpperCAmelCase = 50 , ):
pass
def _snake_case ( self ):
return KarrasVeSchedulerState.create()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = () ):
lowercase__: List[Any] = jnp.arange(0 , _UpperCAmelCase )[::-1].copy()
lowercase__: Optional[int] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_UpperCAmelCase , schedule=jnp.array(_UpperCAmelCase , dtype=jnp.floataa ) , timesteps=_UpperCAmelCase , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__: Tuple = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__: Optional[int] = random.split(_UpperCAmelCase , num=1 )
lowercase__: Optional[int] = self.config.s_noise * random.normal(key=_UpperCAmelCase , shape=sample.shape )
lowercase__: Any = sigma + gamma * sigma
lowercase__: List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
lowercase__: int = sample_hat + sigma_hat * model_output
lowercase__: Tuple = (sample_hat - pred_original_sample) / sigma_hat
lowercase__: str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_UpperCAmelCase , derivative=_UpperCAmelCase , state=_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
lowercase__: Optional[int] = sample_prev + sigma_prev * model_output
lowercase__: Any = (sample_prev - pred_original_sample) / sigma_prev
lowercase__: Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_UpperCAmelCase , derivative=_UpperCAmelCase , state=_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
raise NotImplementedError()
| 586 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "canine"
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=16384 , _UpperCAmelCase=16 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=0XE000 , _UpperCAmelCase=0XE001 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=8 , _UpperCAmelCase=16384 , _UpperCAmelCase=128 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Union[str, Any] = max_position_embeddings
lowercase__: Tuple = hidden_size
lowercase__: Optional[Any] = num_hidden_layers
lowercase__: Union[str, Any] = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: str = hidden_dropout_prob
lowercase__: Tuple = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: Tuple = type_vocab_size
lowercase__: Any = layer_norm_eps
# Character config:
lowercase__: List[str] = downsampling_rate
lowercase__: Union[str, Any] = upsampling_kernel_size
lowercase__: Union[str, Any] = num_hash_functions
lowercase__: Optional[Any] = num_hash_buckets
lowercase__: Tuple = local_transformer_stride
| 586 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , **UpperCAmelCase , ):
super().__init__(features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , **UpperCAmelCase )
a_ = Sql(
cache_dir=UpperCAmelCase , features=UpperCAmelCase , sql=UpperCAmelCase , con=UpperCAmelCase , **UpperCAmelCase , )
def lowerCAmelCase__ ( self ):
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , )
# Build dataset for splits
a_ = self.builder.as_dataset(
split="""train""" , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class a_ :
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a_ = dataset
a_ = name
a_ = con
a_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a_ = num_proc
a_ = to_sql_kwargs
def lowerCAmelCase__ ( self ):
a_ = self.to_sql_kwargs.pop("""sql""" , UpperCAmelCase )
a_ = self.to_sql_kwargs.pop("""con""" , UpperCAmelCase )
a_ = self.to_sql_kwargs.pop("""index""" , UpperCAmelCase )
a_ = self._write(index=UpperCAmelCase , **self.to_sql_kwargs )
return written
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ , a_ , a_ = args
a_ = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
a_ = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
a_ = batch.to_pandas()
a_ = df.to_sql(self.name , self.con , index=UpperCAmelCase , **UpperCAmelCase )
return num_rows or len(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , **UpperCAmelCase ):
a_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a_ , a_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCAmelCase , UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 511 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowercase__ ={
'allenai/led-base-16384': 1_63_84,
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = LEDTokenizer
lowerCamelCase__ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = getattr(UpperCAmelCase , pre_tok_state.pop("""type""" ) )
a_ = add_prefix_space
a_ = pre_tok_class(**UpperCAmelCase )
a_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a_ = """post_processor"""
a_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
a_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a_ = tuple(state["""sep"""] )
if "cls" in state:
a_ = tuple(state["""cls"""] )
a_ = False
if state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = add_prefix_space
a_ = True
if state.get("""trim_offsets""" , UpperCAmelCase ) != trim_offsets:
a_ = trim_offsets
a_ = True
if changes_to_apply:
a_ = getattr(UpperCAmelCase , state.pop("""type""" ) )
a_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
a_ = value
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None ):
a_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
a_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a_ = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCAmelCase )
if needs_to_be_padded:
a_ = len(UpperCAmelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
a_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 511 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __UpperCamelCase ( unittest.TestCase ):
__A : List[Any] = inspect.getfile(accelerate.test_utils )
__A : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
__A : Optional[int] = ["""accelerate""", """launch"""]
__A : Any = Path.home() / """.cache/huggingface/accelerate"""
__A : Any = """default_config.yaml"""
__A : Union[str, Any] = config_folder / config_file
__A : List[Any] = config_folder / """_default_config.yaml"""
__A : Union[str, Any] = Path("""tests/test_configs""" )
@classmethod
def UpperCamelCase( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCamelCase( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCamelCase( self ):
_UpperCAmelCase = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase( self ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_UpperCamelCase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_UpperCamelCase ), self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase( self ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __UpperCamelCase ( unittest.TestCase ):
__A : Dict = """test-tpu"""
__A : Optional[Any] = """us-central1-a"""
__A : int = """ls"""
__A : Tuple = ["""accelerate""", """tpu-config"""]
__A : Union[str, Any] = """cd /usr/share"""
__A : Optional[Any] = """tests/test_samples/test_command_file.sh"""
__A : Any = """Running gcloud compute tpus tpu-vm ssh"""
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_UpperCamelCase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , ) | 32 | """simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(_a)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> Any:
super().__init__(*a , **a )
self.check_model_type(a )
def _UpperCAmelCase ( self , a=None , a=None , a=None , **a ) -> Any:
lowercase__ , lowercase__ : int = {}, {}
if padding is not None:
lowercase__ : int = padding
if truncation is not None:
lowercase__ : List[Any] = truncation
if top_k is not None:
lowercase__ : Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a , a = None , **a ) -> Optional[Any]:
if isinstance(a , (Image.Image, str) ) and isinstance(a , a ):
lowercase__ : Optional[Any] = {'image': image, 'question': question}
else:
lowercase__ : Optional[Any] = image
lowercase__ : Optional[Any] = super().__call__(a , **a )
return results
def _UpperCAmelCase ( self , a , a=False , a=False ) -> Optional[int]:
lowercase__ : str = load_image(inputs['image'] )
lowercase__ : List[Any] = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=a , truncation=a )
lowercase__ : Optional[int] = self.image_processor(images=a , return_tensors=self.framework )
model_inputs.update(a )
return model_inputs
def _UpperCAmelCase ( self , a ) -> List[str]:
lowercase__ : int = self.model(**a )
return model_outputs
def _UpperCAmelCase ( self , a , a=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
lowercase__ : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase__ : Tuple = model_outputs.logits.sigmoid()[0]
lowercase__ , lowercase__ : List[str] = probs.topk(a )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Dict = scores.tolist()
lowercase__ : Any = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a , a )]
| 599 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Any ):
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
return (-y * np.log(_SCREAMING_SNAKE_CASE ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any ):
UpperCamelCase_ : int = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return np.sum(y * scores - np.log(1 + np.exp(_SCREAMING_SNAKE_CASE ) ) )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=7_0000 ):
UpperCamelCase_ : Tuple = np.zeros(x.shape[1] )
for iterations in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Any = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = sigmoid_function(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = np.dot(x.T , h - y ) / y.size
UpperCamelCase_ : int = theta - alpha * gradient # updating the weights
UpperCamelCase_ : Any = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Tuple = sigmoid_function(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[int] = cost_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = datasets.load_iris()
SCREAMING_SNAKE_CASE : Union[str, Any] = iris.data[:, :2]
SCREAMING_SNAKE_CASE : Optional[int] = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE : str = 0.1
SCREAMING_SNAKE_CASE : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
return sigmoid_function(
np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
(SCREAMING_SNAKE_CASE) : Any = (x[:, 0].min(), x[:, 0].max())
(SCREAMING_SNAKE_CASE) : int = (x[:, 1].min(), x[:, 1].max())
(SCREAMING_SNAKE_CASE) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE : List[str] = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 712 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
a__ :List[str] = StableDiffusionLDMaDPipeline
a__ :str = TEXT_TO_IMAGE_PARAMS
a__ :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase_ : Tuple = CLIPTextModel(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase_ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A_ (self , __UpperCamelCase , __UpperCamelCase=0 ) -> Optional[Any]:
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> str:
UpperCamelCase_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : List[str] = self.get_dummy_components()
UpperCamelCase_ : List[Any] = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : List[str] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = output.rgb, output.depth
UpperCamelCase_ : Optional[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase_ : str = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
UpperCamelCase_ : Optional[Any] = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def A_ (self ) -> List[Any]:
UpperCamelCase_ : Tuple = self.get_dummy_components()
UpperCamelCase_ : int = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Tuple = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase_ : List[Any] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : int = output.rgb, output.depth
UpperCamelCase_ : Dict = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase_ : Optional[Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase_ : Any = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase_ : Optional[int] = ldmad_pipe.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
UpperCamelCase_ : List[Any] = text_inputs["""input_ids"""].to(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = ldmad_pipe.text_encoder(__UpperCamelCase )[0]
UpperCamelCase_ : Optional[int] = prompt_embeds
# forward
UpperCamelCase_ : Any = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def A_ (self ) -> str:
UpperCamelCase_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : int = self.get_dummy_components()
UpperCamelCase_ : List[str] = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
UpperCamelCase_ : int = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : str = """french fries"""
UpperCamelCase_ : Dict = ldmad_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : List[str] = output.rgb, output.depth
UpperCamelCase_ : Dict = rgb[0, -3:, -3:, -1]
UpperCamelCase_ : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
UpperCamelCase_ : Tuple = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 ) -> List[str]:
UpperCamelCase_ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : List[str] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ : Dict = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
UpperCamelCase_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
UpperCamelCase_ : Dict = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : str = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : Tuple = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase_ : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase_ : str = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
UpperCamelCase_ : List[str] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 ) -> Any:
UpperCamelCase_ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ : Any = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
UpperCamelCase_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : List[str] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = output.rgb, output.depth
UpperCamelCase_ : Any = 0.495_586
UpperCamelCase_ : Dict = 0.33_795_515
UpperCamelCase_ : Optional[int] = 112.48_518
UpperCamelCase_ : Union[str, Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def A_ (self ) -> str:
UpperCamelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : Tuple = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : int = 0.4_194_127
UpperCamelCase_ : Optional[Any] = 0.35_375_586
UpperCamelCase_ : Optional[Any] = 0.5_638_502
UpperCamelCase_ : List[Any] = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 138 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class a :
# setable values
snake_case__ = None
snake_case__ = None
snake_case__ = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
return cls()
@dataclass
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
class a ( a__ , a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , _snake_case = 0.02 , _snake_case = 1_00 , _snake_case = 1.007 , _snake_case = 80 , _snake_case = 0.05 , _snake_case = 50 , ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = () ):
"""simple docstring"""
lowerCAmelCase = jnp.arange(0 , _snake_case )[::-1].copy()
lowerCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_snake_case , schedule=jnp.array(_snake_case , dtype=jnp.floataa ) , timesteps=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase = random.split(_snake_case , num=1 )
lowerCAmelCase = self.config.s_noise * random.normal(key=_snake_case , shape=sample.shape )
lowerCAmelCase = sigma + gamma * sigma
lowerCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = True , ):
"""simple docstring"""
lowerCAmelCase = sample_hat + sigma_hat * model_output
lowerCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_snake_case , derivative=_snake_case , state=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = True , ):
"""simple docstring"""
lowerCAmelCase = sample_prev + sigma_prev * model_output
lowerCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_snake_case , derivative=_snake_case , state=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
raise NotImplementedError()
| 4 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
set_seed(770)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_SCREAMING_SNAKE_CASE : List[Any] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.dirname(os.path.abspath(__file__))
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(os.path.expanduser('~'), '.cache')
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def __lowerCAmelCase ( __magic_name__ , __magic_name__=False ):
_lowercase: List[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__magic_name__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
hf_hub_download(repo_id=__magic_name__ , filename=__magic_name__ , local_dir=__magic_name__ )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="text" ):
if model_type == "text":
_lowercase: Optional[int] = BarkSemanticModel
_lowercase: str = BarkSemanticConfig
_lowercase: Dict = BarkSemanticGenerationConfig
elif model_type == "coarse":
_lowercase: Optional[int] = BarkCoarseModel
_lowercase: Dict = BarkCoarseConfig
_lowercase: List[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
_lowercase: int = BarkFineModel
_lowercase: Any = BarkFineConfig
_lowercase: List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_lowercase: List[Any] = f"{model_type}_small" if use_small else model_type
_lowercase: int = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__magic_name__ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
_lowercase: List[Any] = torch.load(__magic_name__ , map_location=__magic_name__ )
# this is a hack
_lowercase: Tuple = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_lowercase: List[str] = model_args["vocab_size"]
_lowercase: Dict = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_lowercase: str = model_args.pop("n_head" )
_lowercase: List[str] = model_args.pop("n_embd" )
_lowercase: Optional[Any] = model_args.pop("n_layer" )
_lowercase: str = ConfigClass(**checkpoint["model_args"] )
_lowercase: Tuple = ModelClass(config=__magic_name__ )
_lowercase: List[str] = GenerationConfigClass()
_lowercase: List[Any] = model_generation_config
_lowercase: Union[str, Any] = checkpoint["model"]
# fixup checkpoint
_lowercase: Any = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(__magic_name__ ):
# replace part of the key with corresponding layer name in HF implementation
_lowercase: Union[str, Any] = k[len(__magic_name__ ) :]
for old_layer_name in new_layer_name_dict:
_lowercase: Any = new_k.replace(__magic_name__ , new_layer_name_dict[old_layer_name] )
_lowercase: Any = state_dict.pop(__magic_name__ )
_lowercase: Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
_lowercase: Tuple = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_lowercase: Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_lowercase: Dict = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(__magic_name__ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(__magic_name__ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(__magic_name__ , strict=__magic_name__ )
_lowercase: Dict = model.num_parameters(exclude_embeddings=__magic_name__ )
_lowercase: List[str] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(__magic_name__ , 3 )} loss" )
model.eval()
model.to(__magic_name__ )
del checkpoint, state_dict
return model
def __lowerCAmelCase ( __magic_name__ , __magic_name__=False , __magic_name__="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_lowercase: int = "cpu" # do conversion on cpu
_lowercase: str = _get_ckpt_path(__magic_name__ , use_small=__magic_name__ )
_lowercase: Optional[Any] = _load_model(__magic_name__ , __magic_name__ , model_type=__magic_name__ , use_small=__magic_name__ )
# load bark initial model
_lowercase: Optional[Any] = _bark_load_model(__magic_name__ , "cpu" , model_type=__magic_name__ , use_small=__magic_name__ )
if model_type == "text":
_lowercase: Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=__magic_name__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_lowercase: Union[str, Any] = 5
_lowercase: Dict = 1_0
if model_type in ["text", "coarse"]:
_lowercase: Tuple = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
_lowercase: List[Any] = bark_model(__magic_name__ )[0]
_lowercase: Any = model(__magic_name__ )
# take last logits
_lowercase: int = output_new_model_total.logits[:, [-1], :]
else:
_lowercase: List[str] = 3
_lowercase: Union[str, Any] = 8
_lowercase: List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_lowercase: str = model(__magic_name__ , __magic_name__ )
_lowercase: int = bark_model(__magic_name__ , __magic_name__ )
_lowercase: Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
_lowercase: int = os.path.join(__magic_name__ , __magic_name__ )
_lowercase: Union[str, Any] = BarkSemanticConfig.from_pretrained(os.path.join(__magic_name__ , "config.json" ) )
_lowercase: Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(__magic_name__ , "config.json" ) )
_lowercase: List[str] = BarkFineConfig.from_pretrained(os.path.join(__magic_name__ , "config.json" ) )
_lowercase: Optional[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_lowercase: Tuple = BarkSemanticModel.from_pretrained(__magic_name__ )
_lowercase: str = BarkCoarseModel.from_pretrained(__magic_name__ )
_lowercase: Optional[Any] = BarkFineModel.from_pretrained(__magic_name__ )
_lowercase: int = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_lowercase: str = BarkConfig.from_sub_model_configs(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowercase: Dict = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_lowercase: List[str] = BarkModel(__magic_name__ )
_lowercase: Any = semantic
_lowercase: Union[str, Any] = coarseAcoustic
_lowercase: int = fineAcoustic
_lowercase: List[Any] = codec
_lowercase: Optional[int] = bark_generation_config
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
bark.save_pretrained(__magic_name__ , repo_id=__magic_name__ , push_to_hub=__magic_name__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 206 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_SCREAMING_SNAKE_CASE : int = get_logger(__name__)
class A :
'''simple docstring'''
lowerCamelCase : Dict = """dummy_data"""
lowerCamelCase : List[Any] = """datasets"""
lowerCamelCase : List[str] = False
def __init__( self : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Union[Version, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[List[Callable]] = None , ):
_lowercase: Dict = 0
_lowercase: Dict = dataset_name
_lowercase: Any = cache_dir
_lowercase: Union[str, Any] = use_local_dummy_data
_lowercase: Tuple = config
# download_callbacks take a single url as input
_lowercase: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowercase: Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowercase: Optional[int] = str(_UpperCamelCase)
# to be downloaded
_lowercase: Dict = None
_lowercase: Dict = None
@property
def UpperCAmelCase__ ( self : str):
if self._dummy_file is None:
_lowercase: Any = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self : Tuple):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def UpperCAmelCase__ ( self : Tuple):
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def UpperCAmelCase__ ( self : int):
_lowercase: List[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowercase: Dict = cached_path(
_UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCamelCase , force_extract=_UpperCamelCase)
return os.path.join(_UpperCamelCase , self.dummy_file_name)
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def UpperCAmelCase__ ( self : List[str]):
if self._bucket_url is None:
_lowercase: Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def UpperCAmelCase__ ( self : Optional[int]):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : str , *_UpperCamelCase : Optional[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowercase: int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowercase: List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCamelCase , _UpperCamelCase):
return self.create_dummy_data_dict(_UpperCamelCase , _UpperCamelCase)
elif isinstance(_UpperCamelCase , (list, tuple)):
return self.create_dummy_data_list(_UpperCamelCase , _UpperCamelCase)
else:
return self.create_dummy_data_single(_UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : Union[str, Any] , *_UpperCamelCase : List[Any]):
return self.download_and_extract(_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Any):
return self.download_and_extract(_UpperCamelCase)
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int):
return path
def UpperCAmelCase__ ( self : Optional[int]):
return {}
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]):
_lowercase: List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCamelCase , _UpperCamelCase):
for single_url in single_urls:
download_callback(_UpperCamelCase)
else:
_lowercase: Any = single_urls
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowercase: List[str] = [os.path.join(_UpperCamelCase , urllib.parse.quote_plus(Path(_UpperCamelCase).name)) for x in single_urls]
else:
_lowercase: Any = single_urls
_lowercase: List[str] = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(Path(_UpperCamelCase).name))
_lowercase: Union[str, Any] = value
# make sure that values are unique
if all(isinstance(_UpperCamelCase , _UpperCamelCase) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
_lowercase: Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]):
_lowercase: Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowercase: Dict = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _UpperCamelCase)) for url in data_url)
_lowercase: Tuple = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
_lowercase: List[str] = [data_url[0]] * len(_UpperCamelCase)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowercase: Optional[Any] = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(_UpperCamelCase)
return dummy_data_list
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict):
for download_callback in self.download_callbacks:
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowercase: Tuple = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(_UpperCamelCase) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self : Optional[int]):
pass
def UpperCAmelCase__ ( self : List[Any]):
pass
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Tuple):
def _iter_archive_members(_UpperCamelCase : Optional[int]):
# this preserves the order of the members inside the ZIP archive
_lowercase: str = Path(self.dummy_file).parent
_lowercase: Tuple = path.relative_to(_UpperCamelCase)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
_lowercase: Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(_UpperCamelCase)
_lowercase: Tuple = Path(_UpperCamelCase)
_lowercase: Optional[int] = _iter_archive_members(_UpperCamelCase) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(_UpperCamelCase).as_posix(), file_path.open("rb")
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Dict):
if not isinstance(_UpperCamelCase , _UpperCamelCase):
_lowercase: List[Any] = [paths]
for path in paths:
if os.path.isfile(_UpperCamelCase):
if os.path.basename(_UpperCamelCase).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCamelCase):
if os.path.basename(_UpperCamelCase).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(_UpperCamelCase):
if filename.startswith((".", "__")):
continue
yield os.path.join(_UpperCamelCase , _UpperCamelCase)
| 206 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase__ : Optional[int] , lowercase__ : Any ) -> Any:
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
UpperCamelCase__ :List[str] = tmp_path / """cache"""
UpperCamelCase__ :Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :int = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> Tuple:
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Union[str, Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> List[str]:
UpperCamelCase__ :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ :List[str] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :Dict = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :str = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A ( lowercase__ : Optional[Any] , lowercase__ : int ) -> Dict:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase__ :int = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
UpperCamelCase__ :Tuple = features.copy()
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = tmp_path / """cache"""
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[Any] ) -> Any:
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def A ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> Tuple:
if issubclass(lowercase__ , lowercase__ ):
UpperCamelCase__ :Any = jsonl_path
elif issubclass(lowercase__ , lowercase__ ):
UpperCamelCase__ :List[str] = [jsonl_path]
UpperCamelCase__ :Optional[int] = tmp_path / """cache"""
UpperCamelCase__ :str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :List[Any] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
def A ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Tuple=("train",) ) -> Optional[Any]:
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
UpperCamelCase__ :Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> Optional[Any]:
UpperCamelCase__ :int = tmp_path / """cache"""
UpperCamelCase__ :List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :List[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[Any] ) -> List[Any]:
UpperCamelCase__ :int = tmp_path / """cache"""
UpperCamelCase__ :Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :str = features.copy() if features else default_expected_features
UpperCamelCase__ :Optional[int] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :List[Any] = JsonDatasetReader({"""train""": jsonl_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( lowercase__ : Tuple , lowercase__ : str , lowercase__ : int ) -> List[Any]:
if split:
UpperCamelCase__ :Dict = {split: jsonl_path}
else:
UpperCamelCase__ :Tuple = """train"""
UpperCamelCase__ :Any = {"""train""": jsonl_path, """test""": jsonl_path}
UpperCamelCase__ :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ :Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :int = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase__ : List[str] ) -> List[str]:
return json.load(lowercase__ )
def A ( lowercase__ : Optional[Any] ) -> Any:
return [json.loads(lowercase__ ) for line in buffer]
class lowerCAmelCase_ :
"""simple docstring"""
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ ).write()
buffer.seek(0 )
UpperCamelCase__ :str = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __a ( self :Any , lowerCamelCase__ :Any , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __a ( self :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __a ( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :str , lowerCamelCase__ :Dict , lowerCamelCase__ :Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[Any] = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase__ ) == 10
def __a ( self :Tuple , lowerCamelCase__ :List[Any] ):
with pytest.raises(lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
UpperCamelCase__ :Optional[int] = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , compression=lowerCamelCase__ ).write()
with fsspec.open(lowerCamelCase__ , """rb""" , compression="""infer""" ) as f:
UpperCamelCase__ :Union[str, Any] = f.read()
with fsspec.open(lowerCamelCase__ , """rb""" , compression="""infer""" ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 45 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCamelCase ( _lowerCAmelCase : Tuple ) -> Tuple:
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> int:
_UpperCAmelCase : Optional[int] = np.max(_outputs, axis=-1, keepdims=_lowerCAmelCase )
_UpperCAmelCase : str = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=_lowerCAmelCase )
class _UpperCAmelCase ( __a):
__a : Tuple = """sigmoid"""
__a : str = """softmax"""
__a : List[Any] = """none"""
@add_end_docstrings(
__a , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _UpperCAmelCase ( __a):
__a : int = False
__a : Union[str, Any] = ClassificationFunction.NONE
def __init__( self , **_A ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __snake_case ( self , _A=None , _A=None , _A="" , **_A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = tokenizer_kwargs
_UpperCAmelCase : Tuple = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
_UpperCAmelCase : Optional[int] = self.model.config.return_all_scores
if isinstance(_A , _A ) or top_k is None:
_UpperCAmelCase : List[str] = top_k
_UpperCAmelCase : List[str] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , _A , )
if return_all_scores:
_UpperCAmelCase : List[str] = None
else:
_UpperCAmelCase : Union[str, Any] = 1
if isinstance(_A , _A ):
_UpperCAmelCase : Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_UpperCAmelCase : Union[str, Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_A , **_A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = super().__call__(*_A , **_A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_UpperCAmelCase : int = """top_k""" not in kwargs
if isinstance(args[0] , _A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __snake_case ( self , _A , **_A ) -> Dict[str, GenericTensor]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.framework
if isinstance(_A , _A ):
return self.tokenizer(**_A , return_tensors=_A , **_A )
elif isinstance(_A , _A ) and len(_A ) == 1 and isinstance(inputs[0] , _A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_A , **_A )
elif isinstance(_A , _A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(_A , return_tensors=_A , **_A )
def __snake_case ( self , _A ) -> str:
'''simple docstring'''
return self.model(**_A )
def __snake_case ( self , _A , _A=None , _A=1 , _A=True ) -> Dict:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_UpperCAmelCase : Any = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_UpperCAmelCase : int = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
_UpperCAmelCase : int = self.model.config.function_to_apply
else:
_UpperCAmelCase : Any = ClassificationFunction.NONE
_UpperCAmelCase : List[Any] = model_outputs["""logits"""][0]
_UpperCAmelCase : Union[str, Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_UpperCAmelCase : str = sigmoid(_A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_UpperCAmelCase : str = softmax(_A )
elif function_to_apply == ClassificationFunction.NONE:
_UpperCAmelCase : Union[str, Any] = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_UpperCAmelCase : List[str] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(_A )
]
if not _legacy:
dict_scores.sort(key=lambda _A : x["score"] , reverse=_A )
if top_k is not None:
_UpperCAmelCase : Tuple = dict_scores[:top_k]
return dict_scores
| 238 | 0 |
import numpy as np
def _a ( __UpperCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: str = """summarization"""
lowerCamelCase__: List[str] = ["""loss"""]
lowerCamelCase__: List[str] = ROUGE_KEYS
lowerCamelCase__: Union[str, Any] = """rouge2"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
if hparams.sortish_sampler and hparams.gpus > 1:
a_ : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
a_ : Any = Path(self.output_dir ) / """metrics.json"""
a_ : Tuple = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
a_ : Dict = 0
a_ : Optional[Any] = defaultdict(lowerCamelCase_ )
a_ : str = self.config.model_type
a_ : Union[str, Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
a_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
a_ : int = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
a_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
a_ : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
a_ : int = get_git_info()["""repo_sha"""]
a_ : Union[str, Any] = hparams.num_workers
a_ : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ):
a_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
a_ : List[str] = self.decoder_start_token_id
a_ : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
a_ : Union[str, Any] = False
a_ : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
a_ : Union[str, Any] = self.hparams.eval_max_gen_length
else:
a_ : Optional[Any] = self.model.config.max_length
a_ : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Dict[str, torch.Tensor] ):
a_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
a_ : List[Any] = True
return readable_batch
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
return self.model(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : List[int] ):
a_ : str = self.tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return lmap(str.strip , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : dict ):
a_ : int = self.tokenizer.pad_token_id
a_ , a_ : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
a_ : Optional[Any] = batch["""labels"""]
if isinstance(self.model , lowerCamelCase_ ):
a_ : List[Any] = self.model._shift_right(lowerCamelCase_ )
else:
a_ : Dict = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
a_ : int = decoder_input_ids
self.save_readable_batch(lowerCamelCase_ )
a_ : str = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ )
a_ : int = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
a_ : Any = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
a_ : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
a_ : int = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
a_ , a_ : str = label_smoothed_nll_loss(
lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ )
return (loss,)
@property
def UpperCAmelCase( self : Union[str, Any] ):
return self.tokenizer.pad_token_id
def UpperCAmelCase( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
a_ : Dict = self._step(lowerCamelCase_ )
a_ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase_ ) )
# tokens per batch
a_ : str = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
a_ : Optional[int] = batch["""input_ids"""].shape[0]
a_ : int = batch["""input_ids"""].eq(self.pad ).sum()
a_ : str = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]="val" ):
self.step_count += 1
a_ : Optional[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
a_ : Tuple = losses["""loss"""]
a_ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
a_ : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
a_ : torch.FloatTensor = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase_ )
a_ : str = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
a_ : Union[str, Any] = self.step_count
self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path
a_ : List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCAmelCase( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int , lowerCamelCase_ : dict ):
a_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
a_ : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
a_ : Optional[int] = (time.time() - ta) / batch["""input_ids"""].shape[0]
a_ : List[str] = self.ids_to_clean_text(lowerCamelCase_ )
a_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
a_ : Tuple = self._step(lowerCamelCase_ )
a_ : List[str] = dict(zip(self.loss_names , lowerCamelCase_ ) )
a_ : Dict = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) )
base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ )
return base_metrics
def UpperCAmelCase( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : str , lowerCamelCase_ : Any ):
return self.validation_epoch_end(lowerCamelCase_ , prefix="""test""" )
def UpperCAmelCase( self : Any , lowerCamelCase_ : Any ):
a_ : List[str] = self.n_obs[type_path]
a_ : Dict = self.target_lens[type_path]
a_ : Optional[Any] = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : bool = False ):
a_ : List[str] = self.get_dataset(lowerCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
a_ : List[str] = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
a_ : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
def UpperCAmelCase( self : Any ):
a_ : int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ )
return dataloader
def UpperCAmelCase( self : Dict ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase( self : List[Any] ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
add_generic_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument("""--logger_name""" , type=lowerCamelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCamelCase_ , default=5_0_0 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCamelCase_ , default="""summarization""" , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ )
parser.add_argument("""--src_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--tgt_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--eval_beams""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
"""--val_metric""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = """translation"""
lowerCamelCase__: int = ["""loss"""]
lowerCamelCase__: List[str] = ["""bleu"""]
lowerCamelCase__: Optional[Any] = """bleu"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ):
super().__init__(lowerCamelCase_ , **lowerCamelCase_ )
a_ : Union[str, Any] = hparams.src_lang
a_ : Optional[int] = hparams.tgt_lang
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
def _a ( __UpperCamelCase , __UpperCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__UpperCamelCase )
check_output_dir(__UpperCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
a_ : SummarizationModule = SummarizationModule(__UpperCamelCase )
else:
a_ : SummarizationModule = TranslationModule(__UpperCamelCase )
a_ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
a_ : Union[str, Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = os.environ.get("""WANDB_PROJECT""" , __UpperCamelCase )
a_ : str = WandbLogger(name=model.output_dir.name , project=__UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
a_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
a_ : str = False
a_ : Tuple = args.val_metric == """loss"""
a_ : pl.Trainer = generic_train(
__UpperCamelCase , __UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCamelCase ) , early_stopping_callback=__UpperCamelCase , logger=__UpperCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
a_ : Union[str, Any] = """"""
a_ : int = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__UpperCamelCase ) )
if checkpoints:
a_ : Tuple = checkpoints[-1]
a_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 478 | 1 |
"""simple docstring"""
class lowercase__ :
'''simple docstring'''
def __init__( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = []
def lowercase__ ( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase_ = self.__min_dist_top_down_dp(_UpperCAmelCase , n - 1 )
UpperCAmelCase_ = self.__min_dist_top_down_dp(m - 1 , _UpperCAmelCase )
UpperCAmelCase_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase_ = 1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
def lowercase__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = worda
UpperCAmelCase_ = worda
UpperCAmelCase_ = [[-1 for _ in range(len(_UpperCAmelCase ) )] for _ in range(len(_UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(_UpperCAmelCase ) - 1 , len(_UpperCAmelCase ) - 1 )
def lowercase__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = worda
UpperCAmelCase_ = worda
UpperCAmelCase_ = len(_UpperCAmelCase )
UpperCAmelCase_ = len(_UpperCAmelCase )
UpperCAmelCase_ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase_ = j
elif j == 0: # second string is empty
UpperCAmelCase_ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase_ = self.dp[i - 1][j - 1]
else:
UpperCAmelCase_ = self.dp[i][j - 1]
UpperCAmelCase_ = self.dp[i - 1][j]
UpperCAmelCase_ = self.dp[i - 1][j - 1]
UpperCAmelCase_ = 1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
lowerCamelCase = input("""Enter the first string: """).strip()
lowerCamelCase = input("""Enter the second string: """).strip()
print()
print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 82 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__A = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__A = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = DistilBertTokenizer
def __init__(self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->str:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: List[str] =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: Optional[int] =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: Any =tokenize_chinese_chars
lowerCamelCase__: Any =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: str =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: str =[self.sep_token_id]
lowerCamelCase__: str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 59 | 0 |
"""simple docstring"""
def _lowerCamelCase ( ):
lowercase__ : int = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowerCamelCase__ )[-10:]
if __name__ == "__main__":
print(solution()) | 128 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 128 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 'mvp'
SCREAMING_SNAKE_CASE : str = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int ,lowercase__ : str=5_0_2_6_7 ,lowercase__ : List[str]=1_0_2_4 ,lowercase__ : Union[str, Any]=1_2 ,lowercase__ : Optional[int]=4_0_9_6 ,lowercase__ : Tuple=1_6 ,lowercase__ : Union[str, Any]=1_2 ,lowercase__ : Union[str, Any]=4_0_9_6 ,lowercase__ : Optional[int]=1_6 ,lowercase__ : int=0.0 ,lowercase__ : Any=0.0 ,lowercase__ : Optional[int]="gelu" ,lowercase__ : Dict=1_0_2_4 ,lowercase__ : List[str]=0.1 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Any=0.0 ,lowercase__ : List[str]=False ,lowercase__ : List[str]=True ,lowercase__ : Optional[int]=1 ,lowercase__ : int=0 ,lowercase__ : List[Any]=2 ,lowercase__ : str=True ,lowercase__ : Dict=2 ,lowercase__ : str=2 ,lowercase__ : Tuple=False ,lowercase__ : List[str]=1_0_0 ,lowercase__ : int=8_0_0 ,**lowercase__ : Union[str, Any] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = classifier_dropout
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase = use_prompt
__lowercase = prompt_length
__lowercase = prompt_mid_dim
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' ,lowercase__ ):
__lowercase = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 41 | """simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE__ : List[str] ='\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
@staticmethod
def a__ ( _lowercase ) -> Dict:
_lowerCamelCase : Any = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_lowercase , required=_lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_lowercase , required=_lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_lowercase , required=_lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_lowercase , default=_lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase , ) -> str:
_lowerCamelCase : Tuple = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
_lowerCamelCase : List[Any] = model_type
_lowerCamelCase : Union[str, Any] = tf_checkpoint
_lowerCamelCase : Tuple = pytorch_dump_output
_lowerCamelCase : Tuple = config
_lowerCamelCase : Optional[Any] = finetuning_task_name
def a__ ( self ) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCamelCase : Tuple = self._tf_checkpoint
_lowerCamelCase : int = ''''''
else:
_lowerCamelCase : List[str] = self._tf_checkpoint
_lowerCamelCase : str = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 434 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = BlipImageProcessor()
lowerCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCAmelCase_ : List[Any] = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self , **_lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def UpperCAmelCase__ ( self , **_lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ : List[str] = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Any = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ : Optional[Any] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
lowerCAmelCase_ : Optional[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = self.get_image_processor()
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
lowerCAmelCase_ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : str = image_processor(snake_case_ , return_tensors="""np""" )
lowerCAmelCase_ : Optional[int] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = self.get_image_processor()
lowerCAmelCase_ : Dict = self.get_tokenizer()
lowerCAmelCase_ : str = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
lowerCAmelCase_ : Union[str, Any] = """lower newer"""
lowerCAmelCase_ : Tuple = processor(text=snake_case_ )
lowerCAmelCase_ : Optional[Any] = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
lowerCAmelCase_ : Optional[int] = """lower newer"""
lowerCAmelCase_ : List[str] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = self.get_image_processor()
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : str = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
lowerCAmelCase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : Dict = processor.batch_decode(snake_case_ )
lowerCAmelCase_ : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : Tuple = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
lowerCAmelCase_ : int = """lower newer"""
lowerCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Any = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 720 |
from collections.abc import Generator
from math import sin
def _lowerCAmelCase ( _a : bytes ) -> bytes:
if len(_a ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCAmelCase_ : Any = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCAmelCase ( _a : int ) -> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase_ : Tuple = format(_a , """08x""" )[-8:]
lowerCAmelCase_ : Any = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def _lowerCAmelCase ( _a : bytes ) -> bytes:
lowerCAmelCase_ : Tuple = B""""""
for char in message:
bit_string += format(_a , """08b""" ).encode("""utf-8""" )
lowerCAmelCase_ : Dict = format(len(_a ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_a ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCAmelCase ( _a : bytes ) -> Generator[list[int], None, None]:
if len(_a ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(_a ) , 5_12 ):
lowerCAmelCase_ : int = bit_string[pos : pos + 5_12]
lowerCAmelCase_ : Any = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowerCAmelCase ( _a : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase_ : List[str] = format(_a , """032b""" )
lowerCAmelCase_ : Optional[int] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_a , 2 )
def _lowerCAmelCase ( _a : int , _a : int ) -> int:
return (a + b) % 2**32
def _lowerCAmelCase ( _a : int , _a : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCAmelCase ( _a : bytes ) -> bytes:
lowerCAmelCase_ : Union[str, Any] = preprocess(_a )
lowerCAmelCase_ : Optional[int] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCAmelCase_ : Tuple = 0X67452301
lowerCAmelCase_ : Optional[int] = 0XEFCDAB89
lowerCAmelCase_ : Tuple = 0X98BADCFE
lowerCAmelCase_ : Tuple = 0X10325476
lowerCAmelCase_ : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_a ):
lowerCAmelCase_ : int = aa
lowerCAmelCase_ : Any = ba
lowerCAmelCase_ : List[str] = ca
lowerCAmelCase_ : Optional[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase_ : int = d ^ (b & (c ^ d))
lowerCAmelCase_ : List[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase_ : List[Any] = c ^ (d & (b ^ c))
lowerCAmelCase_ : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase_ : Union[str, Any] = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase_ : Any = c ^ (b | not_aa(_a ))
lowerCAmelCase_ : List[str] = (7 * i) % 16
lowerCAmelCase_ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase_ : Tuple = d
lowerCAmelCase_ : Optional[Any] = c
lowerCAmelCase_ : Dict = b
lowerCAmelCase_ : List[Any] = sum_aa(_a , left_rotate_aa(_a , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase_ : Optional[int] = sum_aa(_a , _a )
lowerCAmelCase_ : Optional[int] = sum_aa(_a , _a )
lowerCAmelCase_ : Dict = sum_aa(_a , _a )
lowerCAmelCase_ : Tuple = sum_aa(_a , _a )
lowerCAmelCase_ : int = reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
super().__init__(self , **__lowerCAmelCase )
__a = repo_info
__a = token
__a = None
def _UpperCAmelCase ( self : Any ):
if self.dir_cache is None:
__a = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {"name": str(__lowerCAmelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] = "rb" , **__SCREAMING_SNAKE_CASE : int , ):
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
self._get_dirs()
__a = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , **__SCREAMING_SNAKE_CASE : Tuple ):
self._get_dirs()
__a = PurePosixPath(path.strip("/" ) )
__a = {}
for p, f in self.dir_cache.items():
__a = PurePosixPath(p.strip("/" ) )
__a = p.parent
if root == path:
__a = f
__a = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 197 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 0 |
"""simple docstring"""
import qiskit
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> qiskit.result.counts.Counts:
lowerCAmelCase__ : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
lowerCAmelCase__ : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase__ : Optional[Any] = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
_A = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 710 |
"""simple docstring"""
from math import isqrt
def lowercase_ ( __UpperCAmelCase ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCAmelCase ) + 1 ) )
def lowercase_ ( __UpperCAmelCase = 10**6 ) -> int:
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 507 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=SCREAMING_SNAKE_CASE__ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=SCREAMING_SNAKE_CASE__ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=SCREAMING_SNAKE_CASE__ , default=4_2 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=SCREAMING_SNAKE_CASE__ , default=0 , help="cuda_id." , )
UpperCAmelCase__ = parser.parse_args()
return args
def __UpperCAmelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
if not len(SCREAMING_SNAKE_CASE__ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
UpperCAmelCase__ , UpperCAmelCase__ = imgs[0].size
UpperCAmelCase__ = Image.new("RGB" , size=(cols * w, rows * h) )
UpperCAmelCase__ , UpperCAmelCase__ = grid.size
for i, img in enumerate(SCREAMING_SNAKE_CASE__ ):
grid.paste(SCREAMING_SNAKE_CASE__ , box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( __A , __A="robotic cat with wings" , __A=7.5 , __A=5_0 , __A=1 , __A=4_2 , ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = torch.Generator(pipeline.device ).manual_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = pipeline(
SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ , ).images
UpperCAmelCase__ = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = image_grid(SCREAMING_SNAKE_CASE__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
A = parse_args()
# Load models and create wrapper for stable diffusion
A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
A = unet.to(torch.device("cuda", args.cuda_id))
A = pipeline.to(unet.device)
A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
A = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 475 |
'''simple docstring'''
__UpperCamelCase : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: bytes ) -> bytes:
"""simple docstring"""
# Make sure the supplied data is a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
__a = ''.join(bin(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for byte in data )
__a = len(SCREAMING_SNAKE_CASE__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'=' * ((6 - len(SCREAMING_SNAKE_CASE__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE__ ) % 6)
else:
__a = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(SCREAMING_SNAKE_CASE__ ), 6 ) ).encode()
+ padding
)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bytes:
"""simple docstring"""
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = (
'argument should be a bytes-like object or ASCII string, '
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
try:
__a = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__a = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(SCREAMING_SNAKE_CASE__ ), 8 )
]
return bytes(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 448 | 0 |
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = 0
for i in range(1, 10_01 ):
total += i**i
return str(lowerCAmelCase_ )[-10:]
if __name__ == "__main__":
print(solution()) | 703 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : list, _lowerCAmelCase : list, _lowerCAmelCase : int ):
"""simple docstring"""
_a = len(_lowerCAmelCase )
_a = [[0] * n for i in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
_a = y_points[i]
for i in range(2, _lowerCAmelCase ):
for j in range(_lowerCAmelCase, _lowerCAmelCase ):
_a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 285 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 473 | """simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = """Speech2TextFeatureExtractor"""
lowerCAmelCase__ : Union[str, Any] = """Speech2TextTokenizer"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : str = self.feature_extractor
a_ : Dict = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a_ : Any = kwargs.pop("raw_speech" )
else:
a_ : Tuple = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE )
a_ : int = kwargs.pop("sampling_rate" , _SCREAMING_SNAKE_CASE )
a_ : Dict = kwargs.pop("text" , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
a_ : List[Any] = args[0]
a_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a_ : int = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
a_ : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ : Any = encodings["input_ids"]
return inputs
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def A ( self ) -> List[Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a_ : Tuple = True
a_ : Tuple = self.tokenizer
yield
a_ : int = self.feature_extractor
a_ : str = False
| 473 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Any = IFInpaintingPipeline
SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE : str = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return self._get_dummy_components()
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> Any:
if str(_lowerCAmelCase ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(_lowerCAmelCase )
else:
lowerCamelCase_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Tuple ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self : Union[str, Any] ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase ( self : List[str] ) -> Tuple:
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self : str ) -> Optional[int]:
self._test_save_load_local()
def UpperCamelCase ( self : List[str] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 709 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class a ( unittest.TestCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=1 / 255 , __SCREAMING_SNAKE_CASE : Dict=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_pad
def UpperCamelCase ( self : List[Any] ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> str:
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Any = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase ( self : Optional[int] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : int ) -> str:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCamelCase ( self : Optional[int] ) -> int:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
pass
def UpperCamelCase ( self : Union[str, Any] ) -> str:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : str ) -> Any:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Tuple ) -> str:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self : Optional[Any] ) -> str:
# prepare image and target
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor()
lowerCamelCase_ = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __SCREAMING_SNAKE_CASE ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __SCREAMING_SNAKE_CASE ) )
@slow
def UpperCamelCase ( self : Tuple ) -> str:
# prepare image, target and masks_path
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCamelCase_ = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __SCREAMING_SNAKE_CASE ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __SCREAMING_SNAKE_CASE ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __SCREAMING_SNAKE_CASE ) )
| 137 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[str] = []
UpperCAmelCase__: List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase__: Any = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase__: Optional[Any] = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
UpperCAmelCase__: List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
UpperCAmelCase__: List[str] = resnets
UpperCAmelCase__: Union[str, Any] = attentions
if self.add_downsample:
UpperCAmelCase__: Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ):
UpperCAmelCase__: str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase__: int = resnet(__snake_case , __snake_case , deterministic=__snake_case )
UpperCAmelCase__: Dict = attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase__: List[Any] = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[Any] = []
for i in range(self.num_layers ):
UpperCAmelCase__: Optional[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase__: Tuple = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
UpperCAmelCase__: Any = resnets
if self.add_downsample:
UpperCAmelCase__: Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ):
UpperCAmelCase__: str = ()
for resnet in self.resnets:
UpperCAmelCase__: Any = resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase__: Any = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[Any] = []
UpperCAmelCase__: int = []
for i in range(self.num_layers ):
UpperCAmelCase__: Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase__: Dict = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase__: Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
UpperCAmelCase__: Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
UpperCAmelCase__: Optional[int] = resnets
UpperCAmelCase__: List[str] = attentions
if self.add_upsample:
UpperCAmelCase__: Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase__: str = res_hidden_states_tuple[-1]
UpperCAmelCase__: Dict = res_hidden_states_tuple[:-1]
UpperCAmelCase__: Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase__: Optional[int] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
UpperCAmelCase__: Tuple = attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
UpperCAmelCase__: Optional[Any] = self.upsamplers_a(__snake_case )
return hidden_states
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = []
for i in range(self.num_layers ):
UpperCAmelCase__: Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase__: int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase__: int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
UpperCAmelCase__: List[Any] = resnets
if self.add_upsample:
UpperCAmelCase__: List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ):
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase__: Dict = res_hidden_states_tuple[-1]
UpperCAmelCase__: Any = res_hidden_states_tuple[:-1]
UpperCAmelCase__: List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase__: Any = resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
UpperCAmelCase__: str = self.upsamplers_a(__snake_case )
return hidden_states
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def _UpperCAmelCase ( self ):
# there is always at least one resnet
UpperCAmelCase__: Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase__: Any = []
for _ in range(self.num_layers ):
UpperCAmelCase__: List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
UpperCAmelCase__: Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
UpperCAmelCase__: Optional[int] = resnets
UpperCAmelCase__: Optional[int] = attentions
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ):
UpperCAmelCase__: Optional[int] = self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase__: Optional[int] = attn(__snake_case , __snake_case , deterministic=__snake_case )
UpperCAmelCase__: int = resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states | 113 |
import numpy as np
def __lowercase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ):
return np.where(vector > 0 , __lowerCAmelCase , (alpha * (np.exp(__lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : str = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 516 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : str = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 516 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__ , a__) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_snake_case : List[Any] = ksize + 1
_snake_case : Tuple = np.zeros((ksize, ksize) , dtype=np.floataa)
# each value
for y in range(a__):
for x in range(a__):
# distance from center
_snake_case : str = x - ksize // 2
_snake_case : Optional[Any] = y - ksize // 2
# degree to radiant
_snake_case : List[Any] = theta / 1_8_0 * np.pi
_snake_case : Optional[int] = np.cos(_theta)
_snake_case : Optional[int] = np.sin(_theta)
# get kernel x
_snake_case : Tuple = cos_theta * px + sin_theta * py
# get kernel y
_snake_case : Optional[int] = -sin_theta * px + cos_theta * py
# fill kernel
_snake_case : List[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)) * np.cos(2 * np.pi * _x / lambd + psi)
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
SCREAMING_SNAKE_CASE_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
SCREAMING_SNAKE_CASE_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
SCREAMING_SNAKE_CASE_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
SCREAMING_SNAKE_CASE_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
SCREAMING_SNAKE_CASE_ = out / out.max() * 255
SCREAMING_SNAKE_CASE_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 517 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
SCREAMING_SNAKE_CASE_ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 517 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = sorted(numsa + numsa )
lowercase = divmod(len(_lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[int] = [float(x) for x in input("Enter the elements of first array: ").split()]
__lowerCamelCase : Optional[int] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 717 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCamelCase : Any = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase ( nn.Module ):
def __init__(self : Tuple , A__ : Any ) -> str:
super().__init__()
lowercase = torchvision.models.resnetaaa(pretrained=A__ )
lowercase = list(model.children() )[:-2]
lowercase = nn.Sequential(*A__ )
lowercase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase__ (self : List[str] , A__ : Optional[Any] ) -> Optional[int]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowercase = self.pool(self.model(A__ ) )
lowercase = torch.flatten(A__ , start_dim=2 )
lowercase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase ( _lowercase ):
def __init__(self : int , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] , A__ : Tuple , A__ : Optional[int] ) -> Union[str, Any]:
lowercase = [json.loads(A__ ) for l in open(A__ )]
lowercase = os.path.dirname(A__ )
lowercase = tokenizer
lowercase = labels
lowercase = len(A__ )
lowercase = max_seq_length
lowercase = transforms
def __len__(self : List[str] ) -> Dict:
return len(self.data )
def __getitem__(self : int , A__ : Union[str, Any] ) -> List[str]:
lowercase = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=A__ ) )
lowercase , lowercase , lowercase = sentence[0], sentence[1:-1], sentence[-1]
lowercase = sentence[: self.max_seq_length]
lowercase = torch.zeros(self.n_classes )
lowercase = 1
lowercase = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
lowercase = self.transforms(A__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase__ (self : str ) -> str:
lowercase = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [len(row["sentence"] ) for row in batch]
lowercase , lowercase = len(lowerCAmelCase_ ), max(lowerCAmelCase_ )
lowercase = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
lowercase = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ):
lowercase = input_row["sentence"]
lowercase = 1
lowercase = torch.stack([row["image"] for row in batch] )
lowercase = torch.stack([row["label"] for row in batch] )
lowercase = torch.stack([row["image_start_token"] for row in batch] )
lowercase = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCAmelCase_ ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCAmelCase_ ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 459 | 0 |
import requests
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = {'Content-Type': 'application/json'}
SCREAMING_SNAKE_CASE_ : Dict = requests.post(_lowerCAmelCase, json={'text': message_body}, headers=_lowerCAmelCase )
if response.status_code != 2_0_0:
SCREAMING_SNAKE_CASE_ : int = (
'Request to slack returned an error '
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 101 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
SCREAMING_SNAKE_CASE_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
SCREAMING_SNAKE_CASE_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
SCREAMING_SNAKE_CASE_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = len([g for position, g in enumerate(_lowerCAmelCase ) if g == main_target[position]] )
return (item, float(_lowerCAmelCase ))
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = random.randint(0 , len(_lowerCAmelCase ) - 1 )
__lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = list(_lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCAmelCase = random.choice(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
__lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
__lowerCAmelCase = int(parent_a[1] * 100 ) + 1
__lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_lowerCAmelCase ):
__lowerCAmelCase = population_score[random.randint(0 , _lowerCAmelCase )][0]
__lowerCAmelCase , __lowerCAmelCase = crossover(parent_a[0] , _lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
return pop
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowerCAmelCase = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCAmelCase = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowerCAmelCase )
# Generate random starting population.
__lowerCAmelCase = []
for _ in range(_lowerCAmelCase ):
population.append("""""".join([random.choice(_lowerCAmelCase ) for i in range(len(_lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCAmelCase , __lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCAmelCase = [evaluate(_lowerCAmelCase , _lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
__lowerCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] , reverse=_lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCAmelCase )
# Normalize population score to be between 0 and 1.
__lowerCAmelCase = [
(item, score / len(_lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCAmelCase ):
population.extend(select(population_score[int(_lowerCAmelCase )] , _lowerCAmelCase , _lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
SCREAMING_SNAKE_CASE_ = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 465 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = "▁"
_a : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class __A (__magic_name__ , unittest.TestCase ):
snake_case :List[Any] = BertGenerationTokenizer
snake_case :Tuple = False
snake_case :List[str] = True
def _snake_case ( self ):
super().setUp()
__UpperCAmelCase : Any = BertGenerationTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ):
__UpperCAmelCase : int = "<s>"
__UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase_ ) , 10_02 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def _snake_case ( self ):
__UpperCAmelCase : str = BertGenerationTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
__UpperCAmelCase : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , )
__UpperCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _snake_case ( self ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Dict = "Hello World!"
__UpperCAmelCase : Optional[Any] = [1_85_36, 22_60, 1_01]
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@slow
def _snake_case ( self ):
__UpperCAmelCase : str = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__UpperCAmelCase : int = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@require_torch
@slow
def _snake_case ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__UpperCAmelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__UpperCAmelCase : str = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCamelCase_ , return_tensors="pt" , return_token_type_ids=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = BertGenerationConfig()
__UpperCAmelCase : Tuple = BertGenerationEncoder(UpperCamelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase_ )
model(**UpperCamelCase_ )
@slow
def _snake_case ( self ):
# fmt: off
__UpperCAmelCase : List[Any] = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 10 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCamelCase( lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE : List[str] = """Speech2TextFeatureExtractor"""
__SCREAMING_SNAKE_CASE : Any = """Speech2TextTokenizer"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
__a : List[Any] = self.feature_extractor
__a : Tuple = False
def __call__( self : Dict , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__a : str = kwargs.pop('raw_speech' )
else:
__a : Union[str, Any] = kwargs.pop('audio' , snake_case_ )
__a : List[str] = kwargs.pop('sampling_rate' , snake_case_ )
__a : int = kwargs.pop('text' , snake_case_ )
if len(snake_case_ ) > 0:
__a : str = args[0]
__a : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__a : Optional[int] = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
__a : List[Any] = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a : str = encodings['input_ids']
return inputs
def __lowerCAmelCase ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__a : str = True
__a : Optional[Any] = self.tokenizer
yield
__a : List[Any] = self.feature_extractor
__a : int = False
| 47 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
_UpperCAmelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
_UpperCAmelCase = tf.cast(math.pi , x.dtype )
_UpperCAmelCase = tf.cast(0.04_4715 , x.dtype )
_UpperCAmelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowercase , 3 )) ))
return x * cdf
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
return x * tf.tanh(tf.math.softplus(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
_UpperCAmelCase = tf.cast(0.04_4715 , x.dtype )
_UpperCAmelCase = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
_UpperCAmelCase = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCAmelCase_ ( __lowercase : List[str] ) -> Optional[int]:
'''simple docstring'''
return tf.clip_by_value(_gelu(__lowercase ) , -10 , 10 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str=-1 ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = tf.split(__lowercase , 2 , axis=__lowercase )
return a * tf.math.sigmoid(__lowercase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def UpperCAmelCase_ ( __lowercase : Dict ) -> int:
'''simple docstring'''
return tf.keras.activations.gelu(__lowercase , approximate=__lowercase )
__SCREAMING_SNAKE_CASE :Dict = tf.keras.activations.gelu
__SCREAMING_SNAKE_CASE :str = approximate_gelu_wrap
else:
__SCREAMING_SNAKE_CASE :Optional[int] = _gelu
__SCREAMING_SNAKE_CASE :str = _gelu_new
__SCREAMING_SNAKE_CASE :Tuple = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 236 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = only_cross_attention
snake_case: Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
snake_case: Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case: List[str] = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case: str = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case: Tuple = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
snake_case: int = None
snake_case: Tuple = None
# 3. Feed-forward
snake_case: Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
snake_case: Any = None
snake_case: Any = 0
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = chunk_size
snake_case: str = dim
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
snake_case: Optional[int] = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case: int = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
snake_case: List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case: List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
snake_case: Tuple = gate_msa.unsqueeze(1 ) * attn_output
snake_case: List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case: Dict = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[str] = attn_output + hidden_states
# 3. Feed-forward
snake_case: str = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case: List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case: Optional[Any] = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case: int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case: Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: int = int(dim * mult )
snake_case: Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case: int = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
snake_case: Optional[Any] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate='tanh' )
elif activation_fn == "geglu":
snake_case: List[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
snake_case: Optional[int] = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for module in self.net:
snake_case: Optional[int] = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ):
'''simple docstring'''
super().__init__()
snake_case: Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = approximate
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.proj(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: int = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.7_02 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Optional[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = nn.SiLU()
snake_case: Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case: Dict = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
snake_case: str = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.SiLU()
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case: str = emb.chunk(6 , dim=1 )
snake_case: Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case: str = num_groups
snake_case: str = eps
if act_fn is None:
snake_case: Dict = None
else:
snake_case: List[str] = get_activation(SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.act:
snake_case: Optional[Any] = self.act(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.linear(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = emb[:, :, None, None]
snake_case , snake_case: List[Any] = emb.chunk(2 , dim=1 )
snake_case: Any = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
snake_case: Optional[int] = x * (1 + scale) + shift
return x | 692 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: str = [0] * len(__A )
snake_case: Tuple = []
snake_case: Tuple = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case: int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case: Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 692 | 1 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( snake_case_ ):
_lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_=125 , UpperCamelCase_=None , **UpperCamelCase_ , ):
if extra_ids > 0 and additional_special_tokens is None:
__magic_name__ = [f'''<extra_id_{i}>''' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__magic_name__ = len(set(filter(lambda UpperCamelCase_ : bool('''extra_id''' in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
__magic_name__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
__magic_name__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
__magic_name__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__magic_name__ = extra_ids
__magic_name__ = 2**8 # utf is 8 bits
# define special tokens dict
__magic_name__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__magic_name__ = len(self.special_tokens_encoder )
__magic_name__ = len(__UpperCAmelCase )
for i, token in enumerate(__UpperCAmelCase ):
__magic_name__ = self.vocab_size + i - n
__magic_name__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCAmelCase__ ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + [1]
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = self._add_eos_if_not_present(__UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
__magic_name__ = self._add_eos_if_not_present(__UpperCAmelCase )
return token_ids_a + token_ids_a
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = [chr(__UpperCAmelCase ) for i in text.encode('''utf-8''' )]
return tokens
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
if token in self.special_tokens_encoder:
__magic_name__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__magic_name__ = self.added_tokens_encoder[token]
elif len(__UpperCAmelCase ) != 1:
__magic_name__ = self.unk_token_id
else:
__magic_name__ = ord(__UpperCAmelCase ) + self._num_special_tokens
return token_id
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
if index in self.special_tokens_decoder:
__magic_name__ = self.special_tokens_decoder[index]
else:
__magic_name__ = chr(index - self._num_special_tokens )
return token
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
__magic_name__ = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
__magic_name__ = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
__magic_name__ = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
__magic_name__ = token.encode('''utf-8''' )
else:
__magic_name__ = bytes([ord(__UpperCAmelCase )] )
bstring += tok_string
__magic_name__ = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
return ()
| 490 |
'''simple docstring'''
from string import ascii_uppercase
snake_case : List[str] = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowercase__ ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
__lowercase = """"""
__lowercase = 0
__lowercase = 0
while div != 1:
__lowercase , __lowercase = divmod(__UpperCamelCase , __UpperCamelCase )
if base >= 11 and 9 < mod < 36:
__lowercase = ALPHABET_VALUES[str(__UpperCamelCase )]
else:
__lowercase = str(__UpperCamelCase )
new_value += actual_value
__lowercase = num // base
__lowercase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 566 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__snake_case : Union[str, Any] = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =_TestCommandArgs(dataset=__snake_case, all_configs=__snake_case, save_infos=__snake_case )
A__ : Dict =TestCommand(*__snake_case )
test_command.run()
A__ : Tuple =os.path.join(__snake_case, """README.md""" )
assert os.path.exists(__snake_case )
A__ : str =DatasetInfosDict.from_directory(__snake_case )
A__ : str =DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ), splits=[
{
"""name""": """train""",
"""num_bytes""": 2_351_563,
"""num_examples""": 10_000,
},
{
"""name""": """validation""",
"""num_bytes""": 238_418,
"""num_examples""": 1_000,
},
], download_size=3_940_680, dataset_size=2_589_981, )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A__ , A__ : Dict =getattr(dataset_infos["""default"""], __snake_case ), getattr(expected_dataset_infos["""default"""], __snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case, __snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes )
else:
result == expected
| 215 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case : Dict = logging.getLogger()
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : int =argparse.ArgumentParser()
parser.add_argument("""-f""" )
A__ : int =parser.parse_args()
return args.f
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> None:
'''simple docstring'''
A__ : List[Any] =logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any ) -> int:
'''simple docstring'''
A__ : Optional[Any] =get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
A__ : Optional[Any] =run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any ="""
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase_ )
A__ : Dict ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_ )
A__ : Tuple ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_ )
| 215 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : List[Any] = old_name
if "patch_embed" in old_name:
_a : Optional[int] = old_name.split('.' )
if layer == "0":
_a : Dict = old_name.replace('0' , 'convolution1' )
elif layer == "1":
_a : Union[str, Any] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
_a : Optional[Any] = old_name.replace('3' , 'convolution2' )
else:
_a : Any = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , __a ):
_a : Tuple = R'\b\d{2}\b'
if bool(re.search(__a , __a ) ):
_a : List[str] = re.search(R'\d\.\d\d.' , __a ).group()
else:
_a : List[str] = re.search(R'\d\.\d.' , __a ).group()
if int(match[0] ) < 6:
_a : Union[str, Any] = old_name.replace(__a , '' )
_a : Union[str, Any] = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
_a : Optional[int] = 'intermediate_stages.' + trimmed_name
else:
_a : Optional[int] = old_name.replace(__a , '' )
if int(match[2] ) < num_meta4D_last_stage:
_a : Any = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
_a : Any = str(int(match[2] ) - num_meta4D_last_stage )
_a : Dict = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
_a : List[Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
_a : Optional[int] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
_a : Optional[Any] = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
_a : Union[str, Any] = trimmed_name.replace('fc2' , 'linear_out' )
_a : Tuple = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , __a ):
_a : Any = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
_a : Union[str, Any] = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_a : int = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_a : Dict = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
_a : Tuple = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
_a : Optional[Any] = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
_a : Optional[int] = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
_a : Optional[Any] = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_a : Optional[Any] = new_name.replace('norm' , 'layernorm' )
_a : Optional[Any] = 'efficientformer.' + new_name
else:
_a : Tuple = 'efficientformer.encoder.' + new_name
return new_name
def UpperCAmelCase_ (__a : Any , __a : Tuple ):
"""simple docstring"""
for key in checkpoint.copy().keys():
_a : List[str] = checkpoint.pop(__a )
_a : Optional[int] = val
return checkpoint
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : List[Any] = Image.open(requests.get(__a , stream=__a ).raw )
return image
def UpperCAmelCase_ (__a : Path , __a : Path , __a : Path , __a : bool ):
"""simple docstring"""
_a : List[Any] = torch.load(__a , map_location='cpu' )['model']
_a : Tuple = EfficientFormerConfig.from_json_file(__a )
_a : List[str] = EfficientFormerForImageClassificationWithTeacher(__a )
_a : List[str] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
_a : Optional[int] = config.depths[-1] - config.num_metaad_blocks + 1
_a : Any = convert_torch_checkpoint(__a , __a )
model.load_state_dict(__a )
model.eval()
_a : int = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
_a : Any = prepare_img()
_a : str = 2_5_6
_a : List[Any] = 2_2_4
_a : Optional[Any] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
_a : List[str] = processor(images=__a , return_tensors='pt' ).pixel_values
# original processing pipeline
_a : Dict = Compose(
[
Resize(__a , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(__a ),
ToTensor(),
Normalize(__a , __a ),
] )
_a : Tuple = image_transforms(__a ).unsqueeze(0 )
assert torch.allclose(__a , __a )
_a : int = model(__a )
_a : str = outputs.logits
_a : Optional[Any] = (1, 1_0_0_0)
if "l1" in model_name:
_a : List[Any] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , __a , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_a : Optional[int] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , __a , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_a : Tuple = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__a )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=__a , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=__a , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
__lowerCAmelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 713 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_a : Optional[Any] = 1
_a : str = 1
while repunit:
_a : Union[str, Any] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_ (__a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_a : int = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 319 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *__lowerCamelCase : Dict , **__lowerCamelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : int ) -> Any:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Any , *__lowerCamelCase : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Any ) -> str:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : List[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : str , *__lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : str , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : str , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : str ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : str , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Any , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Any , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : List[str] , *__lowerCamelCase : str , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Dict , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : str , *__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : Any , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : Dict , *__lowerCamelCase : int , **__lowerCamelCase : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Any , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : List[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : str , *__lowerCamelCase : Dict , **__lowerCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : Any , *__lowerCamelCase : Any , **__lowerCamelCase : int ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : str , *__lowerCamelCase : str , **__lowerCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class snake_case_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ['''flax''']
def __init__( self : Dict , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def UpperCAmelCase ( cls : str , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
| 375 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case ) -> str:
if isinstance(__snake_case ,__snake_case ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__snake_case ,__snake_case ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__lowerCAmelCase : Dict = False
if num < 0:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = -num
__lowerCAmelCase : list[int] = []
while num > 0:
binary.insert(0 ,num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__snake_case ) for e in binary )
return "0b" + "".join(str(__snake_case ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 706 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__snake_case : Tuple = logging.get_logger(__name__)
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : Optional[Any] = set()
__lowerCAmelCase : List[Any] = []
def parse_line(__snake_case ):
for line in fp:
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : Tuple = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(__snake_case ) > 0:
__lowerCAmelCase : List[Any] = "\n".join(__snake_case )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(__snake_case )
buffer.clear()
continue
else:
__lowerCAmelCase : List[str] = line.strip()
buffer.append(__snake_case )
if from_gh:
for filename in os.listdir(__snake_case ):
__lowerCAmelCase : List[str] = os.path.join(__snake_case ,__snake_case )
if not os.path.isdir(__snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(__snake_case ) as fp:
parse_line(__snake_case )
else:
try:
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__snake_case ) as fp:
parse_line(__snake_case )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def _lowercase ( __snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Any = set()
__lowerCAmelCase : str = [os.path.join(__snake_case ,__snake_case ) for p in os.listdir(__snake_case ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__snake_case ,__snake_case ) )
return selected_warnings
if __name__ == "__main__":
def _lowercase ( __snake_case ) -> Optional[Any]:
return values.split("," )
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__snake_case : Tuple = parser.parse_args()
__snake_case : Any = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__snake_case : str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__snake_case : Tuple = extract_warnings(args.output_dir, args.targets)
__snake_case : List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 615 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : List[str] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
UpperCamelCase__ : Dict = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
UpperCamelCase__ : str = "▁"
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids']
SCREAMING_SNAKE_CASE = FNetTokenizer
def __init__( self , A__=None , A__=None , A__=False , A__=True , A__=True , A__="<unk>" , A__="[SEP]" , A__="<pad>" , A__="[CLS]" , A__="[MASK]" , **A__ , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = remove_space
_SCREAMING_SNAKE_CASE = keep_accents
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 591 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return {}, {}, {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = load_image(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = image.size
lowercase__ : Union[str, Any] = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
return model_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = model_outputs.predicted_depth
lowercase__ : Dict = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = prediction.squeeze().cpu().numpy()
lowercase__ : Optional[int] = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE_)).astype("""uint8""")
lowercase__ : Optional[Any] = Image.fromarray(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = {}
lowercase__ : Optional[int] = predicted_depth
lowercase__ : Optional[Any] = depth
return output_dict
| 495 |
lowerCamelCase__ : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [False] * len(lowercase_ )
lowercase__ : List[Any] = [s]
lowercase__ : int = True
while queue:
lowercase__ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
lowercase__ : Optional[Any] = True
lowercase__ : int = u
return visited[t]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[int] = [-1] * (len(lowercase_ ))
lowercase__ : Optional[Any] = 0
lowercase__ : List[Any] = []
lowercase__ : Optional[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase__ : Optional[int] = float("""Inf""" )
lowercase__ : Tuple = sink
while s != source:
# Find the minimum value in select path
lowercase__ : List[str] = min(lowercase_ , graph[parent[s]][s] )
lowercase__ : List[str] = parent[s]
max_flow += path_flow
lowercase__ : str = sink
while v != source:
lowercase__ : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase__ : Union[str, Any] = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 495 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase_ :Any = 'mgp-str'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=[32, 128] , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=27 , SCREAMING_SNAKE_CASE_ : int=38 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=50_257 , SCREAMING_SNAKE_CASE_ : int=30_522 , SCREAMING_SNAKE_CASE_ : List[Any]=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : str=4.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[str]=1e-5 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(**snake_case__ )
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = max_token_length
lowerCAmelCase__ = num_character_labels
lowerCAmelCase__ = num_bpe_labels
lowerCAmelCase__ = num_wordpiece_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = distilled
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = drop_rate
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = attn_drop_rate
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = output_aa_attentions
lowerCAmelCase__ = initializer_range
| 668 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : float ) -> float:
if edge <= 0 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( lowerCamelCase__ : float ) -> float:
if edge <= 0 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Any , __lowerCamelCase : int=13 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=99 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : int=None , ) -> Any:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , *__lowerCamelCase : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = OpenAIGPTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , *__lowerCamelCase : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = OpenAIGPTForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int=False ) -> Dict:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = inputs_dict['''labels''']
SCREAMING_SNAKE_CASE__ = inputs_dict['''labels''']
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowercase_ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = OpenAIGPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def lowercase_ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ )
def lowercase_ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ )
def lowercase_ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ )
@slow
def lowercase_ ( self : str ) -> Tuple:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is
SCREAMING_SNAKE_CASE__ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
SCREAMING_SNAKE_CASE__ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
| 711 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''spiece.model'''}
_SCREAMING_SNAKE_CASE : int = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
_SCREAMING_SNAKE_CASE : int = 2
_SCREAMING_SNAKE_CASE : str = 3
_SCREAMING_SNAKE_CASE : str = 4
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = "left"
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Tuple="<unk>" , __lowerCamelCase : List[str]="<sep>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : int="<cls>" , __lowerCamelCase : Any="<mask>" , __lowerCamelCase : Dict=["<eop>", "<eod>"] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : int , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = remove_space
SCREAMING_SNAKE_CASE__ = keep_accents
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def lowercase_ ( self : Optional[Any] ) -> Union[str, Any]:
return len(self.sp_model )
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : Any , __lowerCamelCase : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
if self.remove_space:
SCREAMING_SNAKE_CASE__ = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE__ = inputs
SCREAMING_SNAKE_CASE__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE__ = unicodedata.normalize('''NFKD''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE__ = outputs.lower()
return outputs
def lowercase_ ( self : Any , __lowerCamelCase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.preprocess_text(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE__ = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> List[Any]:
return self.sp_model.IdToPiece(__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : bool = True , **__lowerCamelCase : int , ) -> str:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''use_source_tokenizer''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = []
sub_texts.append(__lowerCamelCase )
else:
current_sub_text.append(__lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE__ = ''''''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE__ = self.clean_up_tokenization(__lowerCamelCase )
return clean_text
else:
return text
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1]
return ([0] * len(__lowerCamelCase )) + [1, 1]
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 472 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__a = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
def a ( snake_case__: str , snake_case__: Optional[int]=100 , snake_case__: Any=" " ):
'''simple docstring'''
lowercase_ = text.split(snake_case__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case__ ) , snake_case__ )]
def a ( snake_case__: dict ):
'''simple docstring'''
lowercase_ , lowercase_ = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(snake_case__ ):
titles.append(title if title is not None else '''''' )
texts.append(snake_case__ )
return {"title": titles, "text": texts}
def a ( snake_case__: dict , snake_case__: DPRContextEncoder , snake_case__: DPRContextEncoderTokenizerFast ):
'''simple docstring'''
lowercase_ = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=snake_case__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase_ = ctx_encoder(input_ids.to(device=snake_case__ ) , return_dict=snake_case__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a ( snake_case__: "RagExampleArguments" , snake_case__: "ProcessingArguments" , snake_case__: "IndexHnswArguments" , ):
'''simple docstring'''
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase_ = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase_ = dataset.map(snake_case__ , batched=snake_case__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case__ )
lowercase_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase_ = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase_ = dataset.map(
partial(snake_case__ , ctx_encoder=snake_case__ , ctx_tokenizer=snake_case__ ) , batched=snake_case__ , batch_size=processing_args.batch_size , features=snake_case__ , )
# And finally save your dataset
lowercase_ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(snake_case__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=snake_case__ )
# And save the index
lowercase_ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(snake_case__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(
default=str(Path(UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
a :str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
a :str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
a :Optional[str] = field(
default=str(Path(UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class lowercase__:
"""simple docstring"""
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
a :int = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class lowercase__:
"""simple docstring"""
a :int = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
a :int = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__a , __a , __a = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__a = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 97 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681 | 0 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: str = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCAmelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = None
ops.enable_eager_execution_internal()
lowercase__: Union[str, Any] = tf.config.list_physical_devices('CPU' )
if len(lowerCAmelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowercase__: int = tf.config.list_logical_devices(device_type='CPU' )
lowercase__: Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowercase__: str = GradientAccumulator()
lowercase__: List[str] = tf.Variable([4.0, 3.0] )
lowercase__ , lowercase__: List[str] = create_optimizer(5E-5 , 10 , 5 )
lowercase__: Dict = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase__ )
def accumulate_on_replica(lowerCAmelCase__ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowerCAmelCase__ , lowerCAmelCase__ ):
with strategy.scope():
lowercase__: Union[str, Any] = strategy.experimental_local_results(lowerCAmelCase__ )
local_variables[0].assign(lowerCAmelCase__ )
local_variables[1].assign(lowerCAmelCase__ )
strategy.run(lowerCAmelCase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCAmelCase__ )
def _check_local_values(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCAmelCase__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowerCAmelCase__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 335 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def snake_case_ ( snake_case = "" ) -> dict[str, float]:
lowercase__: Any = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowercase__: Optional[Any] = BeautifulSoup(requests.get(snake_case ).text , 'html.parser' )
lowercase__: Optional[int] = soup.find_all('td' , attrs='titleColumn' )
lowercase__: Optional[int] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(snake_case , snake_case )
}
def snake_case_ ( snake_case = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase__: Optional[Any] = get_imdb_top_aaa_movies()
with open(snake_case , 'w' , newline='' ) as out_file:
lowercase__: Optional[Any] = csv.writer(snake_case )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : str = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 219 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : List[str] = mock.Mock()
snake_case__ : Optional[int] = 5_0_0
snake_case__ : int = {}
snake_case__ : List[Any] = HTTPError
snake_case__ : List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowercase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Optional[Any] = mock.Mock()
snake_case__ : str = 5_0_0
snake_case__ : Union[str, Any] = {}
snake_case__ : Optional[int] = HTTPError
snake_case__ : List[str] = {}
# Download this model to make sure it's in the cache.
snake_case__ : Dict = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : str = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ : int = tempfile.mktemp()
with open(__A , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __A )
snake_case__ : Optional[int] = AlbertTokenizer.from_pretrained(__A )
finally:
os.remove(__A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __A )
snake_case__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : int = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowercase ( cls : str ):
snake_case__ : Union[str, Any] = TOKEN
HfFolder.save_token(__A )
@classmethod
def _lowercase ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _lowercase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Union[str, Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A , repo_id="test-tokenizer" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _lowercase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case__ : Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__A , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _lowercase ( self : List[str] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[int] = CustomTokenizer(__A )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[Any] = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
snake_case__ : Union[str, Any] = CustomTokenizerFast.from_pretrained(__A )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case__ : int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__A , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : List[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _lowercase ( self : Optional[int] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ : Dict = Trie()
snake_case__ : Tuple = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__A , ["AB", "C"] )
| 297 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'Wav2Vec2FeatureExtractor'
SCREAMING_SNAKE_CASE_ : Any = 'AutoTokenizer'
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Tuple:
super().__init__(__lowerCamelCase , __lowerCamelCase )
a = self.feature_extractor
a = False
@classmethod
def __UpperCAmelCase ( cls : int , __lowerCamelCase : List[Any] , **__lowerCamelCase : int ) -> Dict:
try:
return super().from_pretrained(__lowerCamelCase , **__lowerCamelCase )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __lowerCamelCase , )
a = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = WavaVecaCTCTokenizer.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
return cls(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
def __call__( self : Dict , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[Any] ) -> List[Any]:
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a = kwargs.pop("raw_speech" )
else:
a = kwargs.pop("audio" , __lowerCamelCase )
a = kwargs.pop("sampling_rate" , __lowerCamelCase )
a = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a = args[0]
a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
a = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a = encodings["input_ids"]
return inputs
def __UpperCAmelCase ( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ) -> Optional[int]:
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase , **__lowerCamelCase )
a = kwargs.pop("input_features" , __lowerCamelCase )
a = kwargs.pop("labels" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a = args[0]
a = args[1:]
if input_features is not None:
a = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if labels is not None:
a = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a = labels["input_ids"]
return input_features
def __UpperCAmelCase ( self : Any , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> int:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def __UpperCAmelCase ( self : Dict ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a = True
a = self.tokenizer
yield
a = self.feature_extractor
a = False
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Tuple ):
return EnvironmentCommand()
def _snake_case ( snake_case__ : List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
download_parser.add_argument(
'--accelerate-config_file' ,default=A_ ,help='The accelerate config file to use for the default values in the launching script.' ,)
download_parser.set_defaults(func=A_ )
def __init__( self : List[Any] ,A_ : Optional[int] ,*A_ : Tuple ) -> None:
A = accelerate_config_file
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = 'not installed'
if is_safetensors_available():
import safetensors
A = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
A = F'{safetensors.__version__} but is ignored because of PyTorch version too old.'
A = 'not installed'
A = A = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(A_ ):
A = load_config_from_file(self._accelerate_config_file ).to_dict()
A = (
'\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(A_ ,A_ )
else F'\t{accelerate_config}'
)
A = 'not installed'
A = 'NA'
if is_torch_available():
import torch
A = torch.__version__
A = torch.cuda.is_available()
A = 'not installed'
A = 'NA'
if is_tf_available():
import tensorflow as tf
A = tf.__version__
try:
# deprecated in v2.1
A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A = bool(tf.config.list_physical_devices('GPU' ) )
A = 'not installed'
A = 'not installed'
A = 'not installed'
A = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
A = flax.__version__
A = jax.__version__
A = jaxlib.__version__
A = jax.lib.xla_bridge.get_backend().platform
A = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F'{safetensors_version}',
'Accelerate version': F'{accelerate_version}',
'Accelerate config': F'{accelerate_config_str}',
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'Tensorflow version (GPU?)': F'{tf_version} ({tf_cuda_available})',
'Flax version (CPU?/GPU?/TPU?)': F'{flax_version} ({jax_backend})',
'Jax version': F'{jax_version}',
'JaxLib version': F'{jaxlib_version}',
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Union[str, Any] ) -> Dict:
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n" | 91 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = DistilBertTokenizer
SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
SCREAMING_SNAKE_CASE = True
@slow
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
__a =tokenizer.encode('sequence builders' , add_special_tokens=__snake_case )
__a =tokenizer.encode('multi-sequence build' , add_special_tokens=__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 242 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 508 |
'''simple docstring'''
import socket
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE : Any = socket.gethostname()
SCREAMING_SNAKE_CASE : str = 1_23_12
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE : int = sock.recv(10_24 )
if not data:
break
out_file.write(__UpperCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 508 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __a , unittest.TestCase ):
A__ : str = CodeGenTokenizer
A__ : str = CodeGenTokenizerFast
A__ : Dict = True
A__ : List[str] = {'add_prefix_space': True}
A__ : Tuple = False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case : Optional[int] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
snake_case : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case : List[str] = {'''unk_token''': '''<unk>'''}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def lowerCAmelCase( self : List[Any] , **UpperCAmelCase__ : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCAmelCase( self : List[str] , **UpperCAmelCase__ : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Dict = '''lower newer'''
snake_case : int = '''lower newer'''
return input_text, output_text
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : List[Any] = '''lower newer'''
snake_case : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case : Any = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Dict = self.get_tokenizer()
snake_case : Dict = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
snake_case : Tuple = '''lower newer'''
# Testing tokenization
snake_case : List[Any] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
snake_case : Optional[int] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
snake_case : Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
snake_case : Union[str, Any] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
snake_case : int = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
snake_case : Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
snake_case : int = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
snake_case : Any = tokens + [rust_tokenizer.unk_token]
snake_case : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowerCAmelCase( self : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : int=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
snake_case : List[Any] = '''This is a simple input'''
snake_case : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : Any = ('''This is a simple input''', '''This is a pair''')
snake_case : Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Union[str, Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case : Dict = '''This is a simple input'''
snake_case : str = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case : str = ('''This is a simple input''', '''This is a pair''')
snake_case : Optional[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case : Any = tokenizer.pad_token_id
snake_case : Optional[int] = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case : str = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
snake_case : Dict = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case : Optional[int] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : List[str] = '''$$$'''
snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
snake_case : str = '''This is a simple input'''
snake_case : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : Optional[int] = tokenizer.bos_token_id
snake_case : int = tokenizer(__lowerCAmelCase )
snake_case : Union[str, Any] = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case : List[str] = tokenizer.decode(out_s.input_ids )
snake_case : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Tuple = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case : str = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case : List[Any] = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case : Tuple = tokenizer.encode(__lowerCAmelCase )
snake_case : Tuple = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case : Optional[Any] = tokenizer.decode(__lowerCAmelCase , truncate_before_pattern=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
| 598 | '''simple docstring'''
def A_ ( _lowerCamelCase : List[Any] ):
_lowerCAmelCase = len(_lowerCamelCase )
_lowerCAmelCase = sum(_lowerCamelCase )
_lowerCAmelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowerCAmelCase = True
for i in range(1 , s + 1 ):
_lowerCAmelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowerCAmelCase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowerCAmelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowerCAmelCase = s - 2 * j
break
return diff
| 309 | 0 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 318 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase : str , lowercase : str , lowercase : str ) ->Any:
"""simple docstring"""
def get_masked_lm_array(lowercase : str ):
lowercase__ = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_array(lowercase : str ):
lowercase__ = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_layer_array(lowercase : int , lowercase : str ):
lowercase__ = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_attention_layer_array(lowercase : int , lowercase : str , lowercase : Any ):
lowercase__ = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
lowercase__ = array.reshape(lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
print(F'''Loading model based on config from {config_path}...''' )
lowercase__ = BertConfig.from_json_file(lowercase )
lowercase__ = BertForMaskedLM(lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase__ = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase__ = layer.attention.self
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase__ = layer.attention.output
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
lowercase__ = get_encoder_layer_array(lowercase , '''_attention_layer_norm/gamma''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_attention_layer_norm/beta''' )
# Intermediate
lowercase__ = layer.intermediate
lowercase__ = get_encoder_layer_array(lowercase , '''_intermediate_dense/kernel''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_intermediate_dense/bias''' )
# Output
lowercase__ = layer.output
lowercase__ = get_encoder_layer_array(lowercase , '''_output_dense/kernel''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_dense/bias''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_layer_norm/gamma''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_layer_norm/beta''' )
# Embeddings
lowercase__ = get_encoder_array('''_position_embedding_layer/embeddings''' )
lowercase__ = get_encoder_array('''_type_embedding_layer/embeddings''' )
lowercase__ = get_encoder_array('''_embedding_norm_layer/gamma''' )
lowercase__ = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
lowercase__ = model.cls.predictions.transform
lowercase__ = get_masked_lm_array('''dense/kernel''' )
lowercase__ = get_masked_lm_array('''dense/bias''' )
lowercase__ = get_masked_lm_array('''layer_norm/gamma''' )
lowercase__ = get_masked_lm_array('''layer_norm/beta''' )
lowercase__ = get_masked_lm_array('''embedding_table''' )
# Pooling
lowercase__ = BertPooler(config=lowercase )
lowercase__ = get_encoder_array('''_pooler_layer/kernel''' )
lowercase__ = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(lowercase )
# Integration test - should load without any errors ;)
lowercase__ = BertForMaskedLM.from_pretrained(lowercase )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE_ (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_a = TextToVideoSDPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_a = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _lowerCAmelCase ( self : List[Any] ) ->Any:
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase_ : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase_ : Tuple = CLIPTextModel(_lowercase )
lowerCamelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase_ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _lowerCAmelCase ( self : Any , __a : Any , __a : Dict=0 ) ->Tuple:
if str(_lowercase ).startswith("""mps""" ):
lowerCamelCase_ : Any = torch.manual_seed(_lowercase )
else:
lowerCamelCase_ : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowerCamelCase_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase_ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Optional[Any] = self.get_dummy_components()
lowerCamelCase_ : Tuple = TextToVideoSDPipeline(**_lowercase )
lowerCamelCase_ : Any = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowerCamelCase_ : str = self.get_dummy_inputs(_lowercase )
lowerCamelCase_ : List[str] = """np"""
lowerCamelCase_ : Tuple = sd_pipe(**_lowercase ).frames
lowerCamelCase_ : Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase_ : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Optional[Any] ) ->Any:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self : int ) ->List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowerCAmelCase ( self : Tuple ) ->str:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowerCAmelCase ( self : Union[str, Any] ) ->str:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _lowerCAmelCase ( self : int ) ->str:
pass
def _lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : Any ) ->Any:
lowerCamelCase_ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
lowerCamelCase_ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowerCamelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Any = pipe.to("""cuda""" )
lowerCamelCase_ : List[str] = """Spiderman is surfing"""
lowerCamelCase_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase_ : Any = pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type="""pt""" ).frames
lowerCamelCase_ : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def _lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
lowerCamelCase_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
lowerCamelCase_ : str = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowerCamelCase_ : Tuple = pipe.to("""cuda""" )
lowerCamelCase_ : Union[str, Any] = """Spiderman is surfing"""
lowerCamelCase_ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""pt""" ).frames
lowerCamelCase_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 278 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A = "allenai"
def __UpperCAmelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = dict((re.sub(R"@@$" , "" , __A ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __A ), v) for k, v in d.items() )
UpperCAmelCase__ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
UpperCAmelCase__ = d[k] # restore
return da
def __UpperCAmelCase ( __A , __A ) -> Any:
'''simple docstring'''
assert os.path.exists(__A )
os.makedirs(__A , exist_ok=__A )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
UpperCAmelCase__ = basename(__A )
UpperCAmelCase__ = dirname(__A )
UpperCAmelCase__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase__ = cls.hub_models()
UpperCAmelCase__ = {"bpe": "fastbpe", "tokenizer": "moses"}
UpperCAmelCase__ = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
UpperCAmelCase__ = hub_utils.from_pretrained(
__A , __A , __A , archive_map=__A , **__A )
UpperCAmelCase__ = vars(chkpt["args"]["model"] )
UpperCAmelCase__ = args["source_lang"]
UpperCAmelCase__ = args["target_lang"]
UpperCAmelCase__ = dirname(__A )
UpperCAmelCase__ = basename(__A )
# dicts
UpperCAmelCase__ = os.path.join(__A , F"""dict.{src_lang}.txt""" )
UpperCAmelCase__ = os.path.join(__A , F"""dict.{tgt_lang}.txt""" )
UpperCAmelCase__ = Dictionary.load(__A )
UpperCAmelCase__ = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ = len(__A )
UpperCAmelCase__ = os.path.join(__A , "vocab-src.json" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase__ = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase__ = False
break
UpperCAmelCase__ = Dictionary.load(__A )
UpperCAmelCase__ = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase__ = len(__A )
UpperCAmelCase__ = os.path.join(__A , "vocab-tgt.json" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
UpperCAmelCase__ = os.path.join(__A , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase__ = os.path.join(__A , __A )
if os.path.exists(__A ):
break
with open(__A , encoding="utf-8" ) as fin:
UpperCAmelCase__ = fin.read()
UpperCAmelCase__ = re.sub(R" \d+$" , "" , __A , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(__A , "w" , encoding="utf-8" ) as fout:
fout.write(__A )
# model config
UpperCAmelCase__ = os.path.join(__A , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
UpperCAmelCase__ = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
UpperCAmelCase__ = 5
UpperCAmelCase__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase__ = best_score_hparams[model_dir]["length_penalty"]
else:
UpperCAmelCase__ = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
UpperCAmelCase__ = os.path.join(__A , __A )
UpperCAmelCase__ = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_0_2_4,
"do_lower_case": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
UpperCAmelCase__ = chkpt["models"][0]
UpperCAmelCase__ = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase__ = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase__ = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
UpperCAmelCase__ = FSMTConfig.from_pretrained(__A )
UpperCAmelCase__ = FSMTForConditionalGeneration(__A )
# check that it loads ok
model_new.load_state_dict(__A , strict=__A )
# save
UpperCAmelCase__ = os.path.join(__A , __A )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__A , __A )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 475 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : str = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Dict:
with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con:
UpperCAmelCase : Any = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Optional[int]:
UpperCAmelCase : Optional[int] = tmp_path / '''cache'''
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Any = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase : List[str] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : Any = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Tuple = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
with pytest.raises(_lowerCAmelCase ):
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 719 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def snake_case_ ( _lowerCAmelCase : List[str] ) -> Dict:
UpperCAmelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCAmelCase : Dict = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def snake_case_ ( _lowerCAmelCase : int ) -> List[Any]:
UpperCAmelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = tf.cast(math.pi , x.dtype )
UpperCAmelCase : Tuple = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : Tuple = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def snake_case_ ( _lowerCAmelCase : Any ) -> Any:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=-1 ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
UpperCamelCase__: List[Any] = tf.keras.activations.gelu
UpperCamelCase__: List[Any] = approximate_gelu_wrap
else:
UpperCamelCase__: List[str] = _gelu
UpperCamelCase__: Tuple = _gelu_new
UpperCamelCase__: List[Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def snake_case_ ( _lowerCAmelCase : Dict ) -> List[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 528 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
SCREAMING_SNAKE_CASE = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
SCREAMING_SNAKE_CASE = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def snake_case_ ( self , __A ):
if self.config_name == "default":
__a = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
__a = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def snake_case_ ( self , __A , __A , __A , __A=None , __A=False ):
if gpus is None:
__a = 1 if torch.cuda.is_available() else 0
__a = {"""src""": sources, """mt""": predictions, """ref""": references}
__a = [dict(zip(__A , __A ) ) for t in zip(*data.values() )]
__a , __a = self.scorer.predict(__A , gpus=__A , progress_bar=__A )
return {"mean_score": mean_score, "scores": scores}
| 99 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
snake_case : Dict = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
snake_case : Any = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
snake_case : Optional[int] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowercase__ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] ):
'''simple docstring'''
__lowercase = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
__lowercase = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
__lowercase = np.array(__UpperCamelCase )
__lowercase = np.array(__UpperCamelCase )
__lowercase = en_sentvecs.shape[0]
# mean centering
__lowercase = en_sentvecs - np.mean(__UpperCamelCase , axis=0 )
__lowercase = in_sentvecs - np.mean(__UpperCamelCase , axis=0 )
__lowercase = cdist(__UpperCamelCase , __UpperCamelCase , """cosine""" )
__lowercase = np.array(range(__UpperCamelCase ) )
__lowercase = sim.argsort(axis=1 )[:, :10]
__lowercase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__( datasets.Metric ):
def __magic_name__ ( self ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCAmelCase , __UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCAmelCase , __UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 566 | 0 |
from __future__ import annotations
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> tuple[np.ndarray, np.ndarray]:
lowercase__ : List[Any] = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowercase__ : Union[str, Any] = (
"'table' has to be of square shaped array but got a "
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = np.zeros((rows, columns) )
lowercase__ : int = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowercase__ : Optional[Any] = (table[i][j] - total) / upper[j][j]
lowercase__ : int = 1
for j in range(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : Optional[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod() | 712 |
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowercase__ : List[str] = [True] * limit
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
lowercase__ : List[str] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
lowercase__ : Dict = i * 2
while index < limit:
lowercase__ : Union[str, Any] = False
lowercase__ : str = index + i
lowercase__ : Union[str, Any] = [2]
for i in range(3 ,SCREAMING_SNAKE_CASE_ ,2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1_00_00_00 ) -> int:
lowercase__ : Any = prime_sieve(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(i + length ,len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Optional[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : Dict = j - i
lowercase__ : Any = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }') | 298 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase (_a ):
def __init__( self: List[str],*A_: Dict,**A_: Tuple ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.',A_,)
super().__init__(*A_,**A_ )
| 1 |
"""simple docstring"""
import copy
import re
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = '''hp'''
_lowerCamelCase: List[Any] = {}
_lowerCamelCase: List[Any] = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple:
A = prefix
A = defaults
cls.build_naming_info()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int:
if len(A_ ) == 0:
return ""
A = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A_ ) + 1 ):
A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A_ : Optional[Any] ):
A = ''
while integer != 0:
A = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
A = 0
while True:
A = word + '#' + int_to_alphabetic(A_ )
if sword in info["reverse_short_word"]:
continue
else:
A = sword
break
A = short_word
A = word
return short_word
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
A = param_name.split('_' )
A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A = ['', '_']
for separator in separators:
A = separator.join(A_ )
if shortname not in info["reverse_short_param"]:
A = shortname
A = param_name
return shortname
return param_name
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple:
A = TrialShortNamer.shortname_for_key(A_ ,A_ )
A = short_name
A = param_name
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]:
if cls.NAMING_INFO is not None:
return
A = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A_ ,A_ )
A = info
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A = cls.NAMING_INFO['short_param'][k]
if isinstance(A_ ,A_ ):
A = 1 if v else 0
A = '' if isinstance(A_ ,(int, float) ) else '-'
A = F'{key}{sep}{v}'
name.append(A_ )
return "_".join(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int:
A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A = []
else:
A = repr.split('_' )
A = {}
for value in values:
if "-" in value:
A , A = value.split('-' )
else:
A = re.sub('[0-9.]' ,'' ,A_ )
A = float(re.sub('[^0-9.]' ,'' ,A_ ) )
A = cls.NAMING_INFO['reverse_short_param'][p_k]
A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A = cls.DEFAULTS[k]
return parameters | 91 | 0 |
from collections import deque
from .hash_table import HashTable
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
A : Tuple = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__UpperCAmelCase )
A : List[Any] = self.values[key]
def snake_case ( self ) -> Tuple:
return (
sum(self.charge_factor - len(__UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(__UpperCAmelCase , __UpperCAmelCase )
| 423 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 423 | 1 |
"""simple docstring"""
import os
from math import logaa
def snake_case ( _a: str = "base_exp.txt" )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_a ) , _a ) ) ):
lowerCamelCase__ , lowerCamelCase__ = list(map(_a , line.split(',' ) ) )
if x * logaa(_a ) > largest:
lowerCamelCase__ = x * logaa(_a )
lowerCamelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 510 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : int=64 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_12 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def _UpperCamelCase ( self : Dict ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = MPNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = MPNetForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MPNetForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MPNetForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MPNetForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Any = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
a_ : Optional[Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[Any] = False
a_ : Any = True
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = MPNetModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = MPNetModel.from_pretrained('microsoft/mpnet-base' )
lowerCamelCase__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 510 | 1 |
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case , snake_case = 0 ) -> None:
"""simple docstring"""
a__ , a__ : Optional[int] = row, column
a__ : Tuple = [[default_value for c in range(snake_case )] for r in range(snake_case )]
def __str__( self ) -> str:
"""simple docstring"""
a__ : Dict = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
a__ : List[str] = 0
for row_vector in self.array:
for obj in row_vector:
a__ : Optional[int] = max(snake_case , len(str(snake_case ) ) )
a__ : int = F"""%{max_element_length}s"""
# Make string and return
def single_line(snake_case ) -> str:
nonlocal string_format_identifier
a__ : Union[str, Any] = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(snake_case ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
"""simple docstring"""
return str(self )
def _snake_case ( self , snake_case ) -> bool:
"""simple docstring"""
if not (isinstance(snake_case , (list, tuple) ) and len(snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , snake_case ) -> Any:
"""simple docstring"""
assert self.validate_indicies(snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__( self , snake_case , snake_case ) -> None:
"""simple docstring"""
assert self.validate_indicies(snake_case )
a__ : Tuple = value
def __add__( self , snake_case ) -> Matrix:
"""simple docstring"""
assert isinstance(snake_case , snake_case )
assert self.row == another.row and self.column == another.column
# Add
a__ : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
"""simple docstring"""
a__ : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : Optional[int] = -self[r, c]
return result
def __sub__( self , snake_case ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self , snake_case ) -> Matrix:
"""simple docstring"""
if isinstance(snake_case , (int, float) ): # Scalar multiplication
a__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : Tuple = self[r, c] * another
return result
elif isinstance(snake_case , snake_case ): # Matrix multiplication
assert self.column == another.row
a__ : List[str] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
a__ : List[Any] = F"""Unsupported type given for another ({type(snake_case )})"""
raise TypeError(snake_case )
def _snake_case ( self ) -> Matrix:
"""simple docstring"""
a__ : Union[str, Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
a__ : Any = self[r, c]
return result
def _snake_case ( self , snake_case , snake_case ) -> Any:
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
a__ : Any = v.transpose()
a__ : Optional[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
# a^(-1)
a__ : Union[str, Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
a__ : Any = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
a__ : Tuple = Matrix(3 , 1 , 0 )
a__ , a__ , a__ : Optional[Any] = 1, 2, -3
a__ : str = Matrix(3 , 1 , 0 )
a__ , a__ , a__ : List[Any] = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}""" )
def _A ( ):
import doctest
doctest.testmod()
testa()
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_snake_case : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
_snake_case : str = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
_snake_case : str = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
_snake_case : Optional[int] = Counter()
for tk_ids in data:
counter.update(tk_ids)
_snake_case : int = [0] * args.vocab_size
for k, v in counter.items():
_snake_case : List[Any] = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 81 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self : Any ) -> List[str]:
'''simple docstring'''
lowercase : List[str] ={'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =self._create_example_records()
lowercase : Tuple =Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =self._create_example_records()
lowercase : Union[str, Any] =Dataset.from_list(UpperCAmelCase )
lowercase : Tuple =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self : str ) -> List[Any]: # checks what happens with missing columns
'''simple docstring'''
lowercase : Tuple =[{'''col_1''': 1}, {'''col_2''': '''x'''}]
lowercase : Optional[Any] =Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def A__ ( self : List[Any] ) -> int: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase : List[Any] =[{'''col_1''': []}, {'''col_1''': [1, 2]}]
lowercase : Union[str, Any] =Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[str] =Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 94 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCamelCase = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCamelCase = '''UperNetConfig'''
class a__ ( nn.Module ):
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Union[int, Tuple[int, int]] , lowerCamelCase_ : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
a_ : str = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , bias=lowerCamelCase_ , dilation=lowerCamelCase_ , )
a_ : Dict = nn.BatchNormad(lowerCamelCase_ )
a_ : Union[str, Any] = nn.ReLU()
def UpperCAmelCase( self : str , lowerCamelCase_ : torch.Tensor ):
a_ : Tuple = self.conv(lowerCamelCase_ )
a_ : int = self.batch_norm(lowerCamelCase_ )
a_ : Optional[int] = self.activation(lowerCamelCase_ )
return output
class a__ ( nn.Module ):
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
super().__init__()
a_ : Union[str, Any] = [
nn.AdaptiveAvgPoolad(lowerCamelCase_ ),
UperNetConvModule(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : torch.Tensor ):
a_ : Optional[int] = input
for layer in self.layers:
a_ : Dict = layer(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple[int, ...] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : bool ):
super().__init__()
a_ : Optional[int] = pool_scales
a_ : List[Any] = align_corners
a_ : Union[str, Any] = in_channels
a_ : List[str] = channels
a_ : Optional[int] = []
for i, pool_scale in enumerate(lowerCamelCase_ ):
a_ : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase_ , in_channels=lowerCamelCase_ , channels=lowerCamelCase_ )
self.blocks.append(lowerCamelCase_ )
self.add_module(str(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase( self : List[str] , lowerCamelCase_ : torch.Tensor ):
a_ : List[str] = []
for ppm in self.blocks:
a_ : Any = ppm(lowerCamelCase_ )
a_ : str = nn.functional.interpolate(
lowerCamelCase_ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase_ )
return ppm_outs
class a__ ( nn.Module ):
def __init__( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ):
super().__init__()
a_ : Tuple = config
a_ : int = config.pool_scales # e.g. (1, 2, 3, 6)
a_ : str = in_channels
a_ : str = config.hidden_size
a_ : int = False
a_ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a_ : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a_ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a_ : List[Any] = nn.ModuleList()
a_ : Optional[int] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a_ : List[str] = UperNetConvModule(lowerCamelCase_ , self.channels , kernel_size=1 )
a_ : Union[str, Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase_ )
self.fpn_convs.append(lowerCamelCase_ )
a_ : str = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase( self : str ):
self.apply(self._init_weights )
def UpperCAmelCase( self : Any , lowerCamelCase_ : Union[str, Any] ):
if isinstance(lowerCamelCase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : Optional[int] ):
a_ : Optional[int] = inputs[-1]
a_ : List[str] = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase_ ) )
a_ : int = torch.cat(lowerCamelCase_ , dim=1 )
a_ : Tuple = self.bottleneck(lowerCamelCase_ )
return output
def UpperCAmelCase( self : List[str] , lowerCamelCase_ : torch.Tensor ):
# build laterals
a_ : List[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase_ ) )
# build top-down path
a_ : Tuple = len(lowerCamelCase_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : List[Any] = laterals[i - 1].shape[2:]
a_ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase_ , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
a_ : int = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : Union[str, Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
a_ : Optional[Any] = torch.cat(lowerCamelCase_ , dim=1 )
a_ : Tuple = self.fpn_bottleneck(lowerCamelCase_ )
a_ : str = self.classifier(lowerCamelCase_ )
return output
class a__ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 3 , lowerCamelCase_ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
a_ : Union[str, Any] = config
a_ : str = config.auxiliary_in_channels
a_ : List[str] = config.auxiliary_channels
a_ : List[Any] = config.auxiliary_num_convs
a_ : Optional[Any] = config.auxiliary_concat_input
a_ : str = in_index
a_ : str = (kernel_size // 2) * dilation
a_ : Union[str, Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , dilation=lowerCamelCase_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , dilation=lowerCamelCase_ ) )
if self.num_convs == 0:
a_ : Dict = nn.Identity()
else:
a_ : Optional[Any] = nn.Sequential(*lowerCamelCase_ )
if self.concat_input:
a_ : List[str] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase_ , padding=kernel_size // 2 )
a_ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase( self : Optional[int] ):
self.apply(self._init_weights )
def UpperCAmelCase( self : List[str] , lowerCamelCase_ : Dict ):
if isinstance(lowerCamelCase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase( self : Any , lowerCamelCase_ : torch.Tensor ):
# just take the relevant feature maps
a_ : List[Any] = encoder_hidden_states[self.in_index]
a_ : Any = self.convs(lowerCamelCase_ )
if self.concat_input:
a_ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a_ : Union[str, Any] = self.classifier(lowerCamelCase_ )
return output
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: Tuple = UperNetConfig
lowerCamelCase__: Tuple = """pixel_values"""
lowerCamelCase__: Optional[Any] = True
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : str ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase( self : List[str] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str=False ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a_ : List[Any] = value
__lowerCamelCase = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCAmelCase_ , )
class a__ ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] ):
super().__init__(lowerCamelCase_ )
a_ : str = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a_ : List[Any] = UperNetHead(lowerCamelCase_ , in_channels=self.backbone.channels )
a_ : List[str] = UperNetFCNHead(lowerCamelCase_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , ):
a_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
a_ : List[Any] = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , output_attentions=lowerCamelCase_ )
a_ : str = outputs.feature_maps
a_ : Tuple = self.decode_head(lowerCamelCase_ )
a_ : Optional[Any] = nn.functional.interpolate(lowerCamelCase_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=lowerCamelCase_ )
a_ : List[Any] = None
if self.auxiliary_head is not None:
a_ : Dict = self.auxiliary_head(lowerCamelCase_ )
a_ : List[Any] = nn.functional.interpolate(
lowerCamelCase_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=lowerCamelCase_ )
a_ : Dict = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
a_ : int = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a_ : List[Any] = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
a_ : Tuple = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a_ : str = (logits,) + outputs[1:]
else:
a_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 478 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =0.0_0
__magic_name__ : Tuple =0
for resistor in resistors:
if resistor <= 0:
__magic_name__ : Optional[int] =F"Resistor at index {index} has a negative or zero value!"
raise ValueError(lowerCamelCase )
first_sum += 1 / float(lowerCamelCase )
index += 1
return 1 / first_sum
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =0.0_0
__magic_name__ : Optional[Any] =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__magic_name__ : Optional[int] =F"Resistor at index {index} has a negative value!"
raise ValueError(lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
import torch
from diffusers import StableDiffusionPipeline
_snake_case = "path-to-your-trained-model"
_snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_snake_case = "A photo of sks dog in a bucket"
_snake_case = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 307 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
A__ : str =field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A__ : ClassVar[Features] =Features({"""text""": Value("""string""" )} )
A__ : ClassVar[Features] =Features({} )
A__ : str ="text"
@property
def A_ ( self : List[Any] ):
return {self.text_column: "text"}
| 400 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : Dict=30 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : List[Any] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def A_ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = TFViTModel(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE__ = self.image_size // 2
SCREAMING_SNAKE_CASE__ = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ , training=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def A_ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE__ = self.image_size // 2
SCREAMING_SNAKE_CASE__ = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A__ : str =(
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
A__ : int =False
A__ : Any =False
A__ : Union[str, Any] =False
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = TFViTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def A_ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A_ ( self : Optional[Any] ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A_ ( self : Dict ):
pass
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , tf.keras.layers.Layer ) )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(UpperCAmelCase_ )
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def A_ ( self : Optional[int] ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCAmelCase_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 )
| 400 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __A :
'''simple docstring'''
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def SCREAMING_SNAKE_CASE__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def SCREAMING_SNAKE_CASE__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = torch.arange(self.height * self.width )
_lowerCAmelCase : Optional[int] = torch.stack(
[
pixel_indices % self.width,
torch.div(_snake_case , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , *_lowerCAmelCase : Optional[int] = self.shape
_lowerCAmelCase : Any = int(np.prod(_snake_case ) )
_lowerCAmelCase : Optional[Any] = self.get_image_coords()
_lowerCAmelCase : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_lowerCAmelCase : Optional[Any] = self.get_camera_rays(_snake_case )
_lowerCAmelCase : int = rays.view(_snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowerCAmelCase : Union[str, Any] = coords.view(_snake_case , -1 , 2 )
_lowerCAmelCase : Dict = self.resolution()
_lowerCAmelCase : List[Any] = self.fov()
_lowerCAmelCase : str = (flat.float() / (res - 1)) * 2 - 1
_lowerCAmelCase : Dict = fracs * torch.tan(fov / 2 )
_lowerCAmelCase : List[str] = fracs.view(_snake_case , -1 , 2 )
_lowerCAmelCase : List[Any] = (
self.z.view(_snake_case , 1 , 3 )
+ self.x.view(_snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
_lowerCAmelCase : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=_snake_case )
_lowerCAmelCase : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(_snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_snake_case , *_snake_case , 2 , 3 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_snake_case , height=_snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : str = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_lowerCAmelCase : Tuple = np.array([np.sin(lowerCAmelCase__ ), np.cos(lowerCAmelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowerCAmelCase : List[Any] = -z * 4
_lowerCAmelCase : Any = np.array([np.cos(lowerCAmelCase__ ), -np.sin(lowerCAmelCase__ ), 0.0] )
_lowerCAmelCase : Dict = np.cross(lowerCAmelCase__ , lowerCAmelCase__ )
origins.append(lowerCAmelCase__ )
xs.append(lowerCAmelCase__ )
ys.append(lowerCAmelCase__ )
zs.append(lowerCAmelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase__ , axis=0 ) ).float() , width=lowerCAmelCase__ , height=lowerCAmelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase__ )) , )
| 424 | import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
snake_case = imread(r"digital_image_processing/image_data/lena_small.jpg")
snake_case = cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Any = cn.convert_to_negative(lowerCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase__ , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : List[str] = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase : str = canny.canny(lowerCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
assert gg.gaussian_filter(lowerCAmelCase__ , 5 , sigma=0.9 ).all()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : str = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase : List[Any] = conv.img_convolve(lowerCAmelCase__ , lowerCAmelCase__ ).astype(lowerCAmelCase__ )
assert res.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
assert med.median_filter(lowerCAmelCase__ , 3 ).any()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Any = sob.sobel_filter(lowerCAmelCase__ )
assert grad.any() and theta.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Any = sp.make_sepia(lowerCAmelCase__ , 20 )
assert sepia.all()
def UpperCamelCase_ ( lowerCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
_lowerCAmelCase : Tuple = bs.Burkes(imread(lowerCAmelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase_ ( lowerCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = rs.NearestNeighbour(imread(lowerCAmelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : str = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_lowerCAmelCase : int = imread(lowerCAmelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[Any] = image[x_coordinate][y_coordinate]
_lowerCAmelCase : Union[str, Any] = lbp.get_neighbors_pixel(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase : Any = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase : List[str] = lbp.local_binary_value(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert lbp_image.any()
| 424 | 1 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = None
_lowercase = None
@property
def __lowerCamelCase ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'padding_value' ) )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : Optional[int] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ : int =self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ : Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict =self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : Dict =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : int =BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ : Optional[int] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ : Dict =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict =self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : str =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
SCREAMING_SNAKE_CASE_ : Tuple =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ : int =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowerCamelCase ( self , __UpperCAmelCase=False ):
def _inputs_have_equal_length(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =len(input[0] )
for input_slice in input[1:]:
if len(__UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
if not np.allclose(np.asarray(__UpperCAmelCase ) , np.asarray(__UpperCAmelCase ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : List[str] =self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : List[str] =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ : List[Any] =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE_ : int =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE_ : List[str] =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE_ : List[str] =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE_ : Optional[Any] =feat_extract.pad(__UpperCAmelCase , padding=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Dict =feat_extract.pad(__UpperCAmelCase , padding='longest' )
SCREAMING_SNAKE_CASE_ : Tuple =input_a[input_name]
SCREAMING_SNAKE_CASE_ : str =feat_extract.pad(__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE_ : Dict =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Dict =feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE_ : str =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=__UpperCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : int =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE_ : int =feat_extract.pad(__UpperCAmelCase , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE_ : Optional[int] =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Any =feat_extract.pad(__UpperCAmelCase , padding='longest' , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE_ : str =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Dict =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Any =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=__UpperCAmelCase , return_tensors='np' , )
SCREAMING_SNAKE_CASE_ : Any =input_a[input_name]
self.assertTrue(all(len(__UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowerCamelCase ( self , __UpperCAmelCase=False ):
def _inputs_have_equal_length(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple =len(input[0] )
for input_slice in input[1:]:
if len(__UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
if not np.allclose(np.asarray(__UpperCAmelCase ) , np.asarray(__UpperCAmelCase ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE_ : str =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : int =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE_ : Optional[Any] =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Tuple =feat_extract.pad(__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE_ : Tuple =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE_ : List[Any] =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Dict =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Tuple =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
# truncate to middle
SCREAMING_SNAKE_CASE_ : int =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=__UpperCAmelCase , return_tensors='np' , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Tuple =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =input_a[input_name]
SCREAMING_SNAKE_CASE_ : List[Any] =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , truncation=__UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='longest' , truncation=__UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='longest' , truncation=__UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='max_length' , truncation=__UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE_ : Optional[int] =12
SCREAMING_SNAKE_CASE_ : Any =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__UpperCAmelCase , truncation=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Any =input_a[input_name]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Tuple =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE_ : Any =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE_ : Optional[int] =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
def __lowerCamelCase ( self ):
self._check_padding(numpify=__UpperCAmelCase )
def __lowerCamelCase ( self ):
self._check_padding(numpify=__UpperCAmelCase )
def __lowerCamelCase ( self ):
self._check_truncation(numpify=__UpperCAmelCase )
def __lowerCamelCase ( self ):
self._check_truncation(numpify=__UpperCAmelCase )
@require_torch
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Dict =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ : Any =feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ : int =feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ : str =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ : Any =feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ : str =feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =self.feat_extract_dict
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extraction_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple =[len(__UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ : Optional[Any] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : str =self.feat_extract_dict
SCREAMING_SNAKE_CASE_ : Any =True
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.feature_extraction_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ : str =[len(__UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ : List[str] =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ : List[Any] =min(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 153 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = StableDiffusionDiffEditPipeline
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase = frozenset([] )
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_zero=__UpperCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE_ : str =CLIPTextModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Tuple ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Any =floats_tensor((1, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Any =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[Any] =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Dict =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
if not hasattr(self.pipeline_class , '_optional_components' ):
return
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =pipe(**__UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.pipeline_class.from_pretrained(__UpperCAmelCase )
pipe_loaded.to(__UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCAmelCase , __UpperCAmelCase ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe_loaded(**__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : str =np.abs(output - output_loaded ).max()
self.assertLess(__UpperCAmelCase , 1E-4 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] ='cpu'
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_mask_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
SCREAMING_SNAKE_CASE_ : str =np.array([0] * 9 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int ='cpu'
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =self.get_dummy_inversion_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe.invert(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : str =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE_ : Tuple =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
SCREAMING_SNAKE_CASE_ : int =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int ='cpu'
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Dict ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE_ : str =DPMSolverMultistepScheduler(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =DPMSolverMultistepInverseScheduler(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inversion_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =pipe.invert(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowerCamelCase ( cls ):
SCREAMING_SNAKE_CASE_ : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE_ : Any =raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE_ : Optional[int] =raw_image
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Dict =DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] ='a bowl of fruit'
SCREAMING_SNAKE_CASE_ : Optional[int] ='a bowl of pears'
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe.invert(
prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase ).latents
SCREAMING_SNAKE_CASE_ : List[Any] =pipe(
prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str ='a bowl of fruit'
SCREAMING_SNAKE_CASE_ : str ='a bowl of pears'
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe.invert(
prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe(
prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE_ : List[str] =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 153 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def __lowercase ( self : str ,_a : Tuple=0 ):
'''simple docstring'''
_a : str = np.random.RandomState(_a )
_a : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_a )
_a : Tuple = self.get_dummy_inputs()
_a : List[Any] = pipe(**_a ).images
_a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a : int = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_a : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
_a : Tuple = self.get_dummy_inputs()
_a : int = pipe(**_a ).images
_a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a : Tuple = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_a : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_a : List[Any] = self.get_dummy_inputs()
_a : Optional[Any] = pipe(**_a ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a : int = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = self.get_dummy_inputs()
_a : Tuple = pipe(**_a ).images
_a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a : Tuple = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_a : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[Any] = self.get_dummy_inputs()
_a : Union[str, Any] = pipe(**_a ).images
_a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a : Union[str, Any] = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_a : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_a : str = self.get_dummy_inputs()
_a : Optional[int] = pipe(**_a ).images
_a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a : Dict = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = self.get_dummy_inputs()
_a : Tuple = 3 * [inputs['prompt']]
# forward
_a : Any = pipe(**_a )
_a : List[str] = output.images[0, -3:, -3:, -1]
_a : Optional[Any] = self.get_dummy_inputs()
_a : List[Any] = 3 * [inputs.pop('prompt' )]
_a : Dict = pipe.tokenizer(
_a ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors='np' ,)
_a : List[str] = text_inputs['input_ids']
_a : List[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_a : str = prompt_embeds
# forward
_a : List[str] = pipe(**_a )
_a : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_a )
_a : List[str] = self.get_dummy_inputs()
_a : int = 3 * ['this is a negative prompt']
_a : Any = negative_prompt
_a : Union[str, Any] = 3 * [inputs['prompt']]
# forward
_a : Optional[Any] = pipe(**_a )
_a : List[Any] = output.images[0, -3:, -3:, -1]
_a : List[Any] = self.get_dummy_inputs()
_a : str = 3 * [inputs.pop('prompt' )]
_a : int = []
for p in [prompt, negative_prompt]:
_a : str = pipe.tokenizer(
_a ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors='np' ,)
_a : Optional[int] = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_a, _a : int = embeds
# forward
_a : Optional[int] = pipe(**_a )
_a : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = ort.SessionOptions()
_a : str = False
return options
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
_a : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
_a : Optional[int] = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type='np' )
_a : Union[str, Any] = output.images
_a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
_a : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
_a : Any = 'open neural network exchange'
_a : List[Any] = np.random.RandomState(0 )
_a : Union[str, Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type='np' )
_a : Optional[Any] = output.images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : List[Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
_a : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
_a : List[Any] = 'open neural network exchange'
_a : Union[str, Any] = np.random.RandomState(0 )
_a : str = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type='np' )
_a : Optional[Any] = output.images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : List[Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = 0
def test_callback_fn(_a : int ,_a : int ,_a : np.ndarray ) -> None:
_a : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_a : Tuple = latents[0, -3:, -3:, -1]
_a : Union[str, Any] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_a : Optional[int] = latents[0, -3:, -3:, -1]
_a : Optional[Any] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_a : List[str] = False
_a : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=_a )
_a : str = 'Andromeda galaxy in a bottle'
_a : Optional[Any] = np.random.RandomState(0 )
pipe(
prompt=_a ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=_a ,callback=_a ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(_a ,_a )
assert pipe.safety_checker is None
_a : Union[str, Any] = pipe('example prompt' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
_a : Any = OnnxStableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a : Any = pipe('example prompt' ,num_inference_steps=2 ).images[0]
assert image is not None
| 229 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
__UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__UpperCAmelCase : str = "text"
__UpperCAmelCase : str = "labels"
def __lowercase ( self : int ,_a : Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,_a ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_a : Optional[int] = copy.deepcopy(self )
_a : Optional[Any] = self.label_schema.copy()
_a : int = features[self.label_column]
_a : Any = label_schema
return task_template
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 229 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
snake_case_ : Optional[int] = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[str]=12 , __magic_name__ : int=7 , __magic_name__ : str=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=99 , __magic_name__ : str=32 , __magic_name__ : Optional[Any]=32 , __magic_name__ : int=2 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : int=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Any=512 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : str=0 , __magic_name__ : Dict=None , ) -> Optional[Any]:
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : int = seq_length
lowerCamelCase_ : Optional[int] = is_training
lowerCamelCase_ : str = use_input_mask
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Optional[Any] = hidden_size
lowerCamelCase_ : str = projection_dim
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Optional[int] = dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : List[Any] = max_position_embeddings
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : Optional[Any] = scope
lowerCamelCase_ : List[str] = bos_token_id
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : int = None
if self.use_input_mask:
lowerCamelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase_ : List[Any] = input_mask.numpy()
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = input_mask.shape
lowerCamelCase_ : int = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 0
lowerCamelCase_ : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Any:
lowerCamelCase_ : Union[str, Any] = TFBlipTextModel(config=__magic_name__ )
lowerCamelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , training=__magic_name__ )
lowerCamelCase_ : Dict = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
lowerCamelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int = config_and_inputs
lowerCamelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
lowerCamelCase_ : List[str] = BlipTextModelTester(self )
lowerCamelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] = TFBlipTextModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Dict=True ) -> Union[str, Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=__magic_name__ )
| 253 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Optional[int] = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 392 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=_snake_case ):
'''simple docstring'''
UpperCamelCase__ : Any = ["onnx"]
def __init__( self : Tuple , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ) ->Any:
'''simple docstring'''
requires_backends(self , ["""onnx"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Dict ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""onnx"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""onnx"""] )
| 392 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = ort.SessionOptions()
_lowerCamelCase = False
return options
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_lowerCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A red cat sitting on a park bench'''
_lowerCamelCase = np.random.RandomState(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type='''np''' , )
_lowerCamelCase = output.images
_lowerCamelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowerCamelCase = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
_lowerCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A red cat sitting on a park bench'''
_lowerCamelCase = np.random.RandomState(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type='''np''' , )
_lowerCamelCase = output.images
_lowerCamelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowerCamelCase = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 638 | from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ ) | 638 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.