code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a ( lowerCamelCase__ ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = np.max(_outputs , axis=-1 , keepdims=lowerCamelCase__ )
A_ : Tuple = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase__ )
class _lowerCAmelCase ( _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'sigmoid'
__SCREAMING_SNAKE_CASE : List[str] = 'softmax'
__SCREAMING_SNAKE_CASE : Any = 'none'
@add_end_docstrings(
_lowerCAmelCase , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class _lowerCAmelCase ( _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : int = ClassificationFunction.NONE
def __init__(self , **lowercase ):
super().__init__(**_lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _a (self , lowercase=None , lowercase=None , lowercase="" , **lowercase ):
A_ : Dict = tokenizer_kwargs
A_ : Tuple = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
A_ : List[Any] = self.model.config.return_all_scores
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) or top_k is None:
A_ : Optional[int] = top_k
A_ : int = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , _lowerCAmelCase , )
if return_all_scores:
A_ : Tuple = None
else:
A_ : int = 1
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A_ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A_ : List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self , *lowercase , **lowercase ):
A_ : int = super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A_ : List[str] = """top_k""" not in kwargs
if isinstance(args[0] , _lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _a (self , lowercase , **lowercase ):
A_ : Optional[Any] = self.framework
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return self.tokenizer(**_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1 and isinstance(inputs[0] , _lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
def _a (self , lowercase ):
return self.model(**_lowerCAmelCase )
def _a (self , lowercase , lowercase=None , lowercase=1 , lowercase=True ):
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A_ : Optional[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A_ : List[str] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
A_ : Union[str, Any] = self.model.config.function_to_apply
else:
A_ : List[str] = ClassificationFunction.NONE
A_ : int = model_outputs["""logits"""][0]
A_ : Union[str, Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A_ : int = sigmoid(_lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A_ : str = softmax(_lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
A_ : List[Any] = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A_ : List[str] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(_lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda lowercase : x["score"] , reverse=_lowerCAmelCase )
if top_k is not None:
A_ : Tuple = dict_scores[:top_k]
return dict_scores | 709 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowerCAmelCase ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 42
class _lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__(self , lowercase = 65536 , lowercase = None , lowercase = 2 , lowercase = 2 , lowercase = 0 , lowercase = "fourier" , lowercase = True , lowercase = False , lowercase = 0.0 , lowercase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowercase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowercase = "UNetMidBlock1D" , lowercase = None , lowercase = (32, 32, 64) , lowercase = None , lowercase = 8 , lowercase = 1 , lowercase = False , ):
super().__init__()
A_ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
A_ : Union[str, Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_a , log=_a , flip_sin_to_cos=_a )
A_ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A_ : str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_a , downscale_freq_shift=_a )
A_ : int = block_out_channels[0]
if use_timestep_embedding:
A_ : List[str] = block_out_channels[0] * 4
A_ : str = TimestepEmbedding(
in_channels=_a , time_embed_dim=_a , act_fn=_a , out_dim=block_out_channels[0] , )
A_ : Dict = nn.ModuleList([] )
A_ : Optional[int] = None
A_ : List[Any] = nn.ModuleList([] )
A_ : Optional[int] = None
# down
A_ : Union[str, Any] = in_channels
for i, down_block_type in enumerate(_a ):
A_ : List[str] = output_channel
A_ : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A_ : Any = i == len(_a ) - 1
A_ : List[str] = get_down_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_a )
# mid
A_ : Union[str, Any] = get_mid_block(
_a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_a , add_downsample=_a , )
# up
A_ : List[Any] = list(reversed(_a ) )
A_ : str = reversed_block_out_channels[0]
if out_block_type is None:
A_ : List[str] = out_channels
else:
A_ : Tuple = block_out_channels[0]
for i, up_block_type in enumerate(_a ):
A_ : str = output_channel
A_ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(_a ) - 1 else final_upsample_channels
)
A_ : Any = i == len(_a ) - 1
A_ : Any = get_up_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_a )
A_ : Tuple = output_channel
# out
A_ : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A_ : Optional[Any] = get_out_block(
out_block_type=_a , num_groups_out=_a , embed_dim=block_out_channels[0] , out_channels=_a , act_fn=_a , fc_dim=block_out_channels[-1] // 4 , )
def _a (self , lowercase , lowercase , lowercase = True , ):
A_ : str = timestep
if not torch.is_tensor(_a ):
A_ : str = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
A_ : int = timesteps[None].to(sample.device )
A_ : Union[str, Any] = self.time_proj(_a )
if self.config.use_timestep_embedding:
A_ : List[str] = self.time_mlp(_a )
else:
A_ : Optional[Any] = timestep_embed[..., None]
A_ : Any = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A_ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A_ : int = ()
for downsample_block in self.down_blocks:
A_ : Optional[Any] = downsample_block(hidden_states=_a , temb=_a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A_ : Dict = self.mid_block(_a , _a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A_ : str = down_block_res_samples[-1:]
A_ : Union[str, Any] = down_block_res_samples[:-1]
A_ : Optional[Any] = upsample_block(_a , res_hidden_states_tuple=_a , temb=_a )
# 5. post-process
if self.out_block:
A_ : Any = self.out_block(_a , _a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_a ) | 710 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=10_24 , lowerCamelCase__=10_24 , lowerCamelCase__=False , **lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
A_ : Tuple = SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path="""train""" , **__UpperCamelCase )
A_ : Any = tok.pad_token_id
def get_lens(lowerCamelCase__ ):
A_ : int = tqdm(
DataLoader(__UpperCamelCase , batch_size=5_12 , num_workers=8 , shuffle=__UpperCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
A_ : List[str] = []
for batch in dl:
A_ : str = batch['''input_ids'''].ne(__UpperCamelCase ).sum(1 ).tolist()
A_ : Any = batch['''labels'''].ne(__UpperCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__UpperCamelCase , __UpperCamelCase ):
max_lens.append(max(__UpperCamelCase , __UpperCamelCase ) )
else:
max_lens.extend(__UpperCamelCase )
return max_lens
A_ : Dict = get_lens(__UpperCamelCase )
A_ : Tuple = SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path="""val""" , **__UpperCamelCase )
A_ : Any = get_lens(__UpperCamelCase )
pickle_save(__UpperCamelCase , train_ds.len_file )
pickle_save(__UpperCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCamelCase :Optional[Any] = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
lowerCamelCase :Dict = '''>>zh<<'''
lowerCamelCase :List[Any] = '''Helsinki-NLP/'''
if is_torch_available():
lowerCamelCase :Tuple = '''pt'''
elif is_tf_available():
lowerCamelCase :str = '''tf'''
else:
lowerCamelCase :Union[str, Any] = '''jax'''
@require_sentencepiece
class _lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = MarianTokenizer
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : int = True
def _a (self ):
super().setUp()
A_ : int = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A_ : Any = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Union[str, Any] = Path(self.tmpdirname )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
A_ : List[str] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self , lowercase ):
return (
"This is a test",
"This is a test",
)
def _a (self ):
A_ : Tuple = """</s>"""
A_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_lowerCamelCase ) , 9 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _a (self ):
A_ : List[Any] = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
A_ : Union[str, Any] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(_lowerCamelCase , batch.input_ids[0] )
A_ : Optional[int] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowerCamelCase )
A_ : List[str] = [x.name for x in Path(_lowerCamelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , _lowerCamelCase )
MarianTokenizer.from_pretrained(_lowerCamelCase )
def _a (self ):
A_ : Dict = self.get_tokenizer()
A_ : List[Any] = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _a (self ):
A_ : int = self.get_tokenizer()
A_ : str = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _a (self ):
A_ : int = {"""input_ids""": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def _a (self ):
A_ : Tuple = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
A_ : int = """Tämä on testi"""
A_ : Union[str, Any] = """This is a test"""
A_ : List[str] = [76, 7, 2047, 2]
A_ : int = [69, 12, 11, 940, 2]
A_ : List[str] = tokenizer(_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = tokenizer(text_target=_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : int = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase ) | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(__snake_case , __snake_case ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
A_ : Optional[int] = False
if num < 0:
A_ : Optional[int] = True
A_ : List[str] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__snake_case ) for e in binary )
return "0b" + "".join(str(__snake_case ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 713 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase :List[Any] = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCamelCase :int = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = SavedModel()
A_ : List[str] = []
with open(os.path.join(a__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
A_ : Tuple = json.load(a__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a__ )] )
with open(a__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
A_ : Any = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
A_ : Optional[Any] = sorted(a__ )
A_ : Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a__ )
if strict and len(a__ ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(a__ ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*a__ , sep="""\n""" )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowerCamelCase :Optional[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 714 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 0 |
from math import ceil
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = list(range(0 , UpperCamelCase__ ) )
A_ : str = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
A_ : int = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
A_ : Tuple = [i for i in blocks if i not in device_map_blocks]
A_ : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(UpperCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = list(range(UpperCamelCase__ ) )
A_ : Dict = int(ceil(n_layers / len(UpperCamelCase__ ) ) )
A_ : str = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) | 715 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Optional[Any] = logging.get_logger()
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE : list = field(default_factory=__UpperCAmelCase )
def _a (self , lowercase , lowercase , lowercase ):
A_ : str = len(list(m.modules() ) ) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__SCREAMING_SNAKE_CASE )
def __call__(self , lowercase ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def _a (self ):
return list(filter(lambda lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List = field(default_factory=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List = field(default_factory=__UpperCAmelCase )
def __call__(self , lowercase ):
A_ : Optional[Any] = Tracker(self.dest )(__SCREAMING_SNAKE_CASE ).parametrized
A_ : int = Tracker(self.src )(__SCREAMING_SNAKE_CASE ).parametrized
A_ : Optional[Any] = list(filter(lambda lowercase : type(__SCREAMING_SNAKE_CASE ) not in self.src_skip , __SCREAMING_SNAKE_CASE ) )
A_ : Tuple = list(filter(lambda lowercase : type(__SCREAMING_SNAKE_CASE ) not in self.dest_skip , __SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise Exception(
F'Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(__SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
A_ : Any = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
A_ : int = ResNetForImageClassification(_UpperCAmelCase ).eval()
A_ : Union[str, Any] = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase )
A_ : Optional[int] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_UpperCAmelCase )
assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one."
A_ : Tuple = f'resnet{"-".join(name.split("resnet" ) )}'
print(_UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=_UpperCAmelCase , )
# we can use the convnext one
A_ : Dict = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=_UpperCAmelCase , )
print(f'Pushed {checkpoint_name}' )
def a ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
'''simple docstring'''
A_ : int = """imagenet-1k-id2label.json"""
A_ : Any = 10_00
A_ : List[Any] = (1, num_labels)
A_ : str = """huggingface/label-files"""
A_ : str = num_labels
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A_ : Dict = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Optional[Any] = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
A_ : List[Any] = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
A_ : List[Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase :Optional[Any] = parser.parse_args()
lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase :Dict = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Union[str, Any] = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 717 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase :Any = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase :Any = 1_0
lowerCamelCase :Any = 2_5_6
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
A_ : str = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class _lowerCAmelCase :
def __init__(self , *,
lowercase = 0.85 , ):
A_ : Optional[Any] = duplication_jaccard_threshold
A_ : Tuple = NUM_PERM
A_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
A_ : List[str] = defaultdict(__A )
def _a (self , lowercase , lowercase ):
A_ : Any = self._index.query(__A )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(__A , __A )
if len(__A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__A )
def _a (self ):
A_ : str = []
for base, duplicates in self._duplicate_clusters.items():
A_ : int = [base] + list(__A )
# reformat the cluster to be a list of dict
A_ : List[str] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__A )
return duplicate_clusters
def _a (self , lowercase ):
A_ : Optional[int] = self.get_duplicate_clusters()
with open(__A , """w""" ) as f:
json.dump(__A , __A )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : int = element
A_ : Dict = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a ( lowerCamelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=1_00 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = get_tokens(_lowercase )
A_ : Dict = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase :Any = None
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = []
for elementa in cluster:
A_ : Optional[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
A_ : List[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
A_ : Optional[Any] = 1
extremes.append(_lowercase )
return extremes
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
global _shared_dataset
A_ : Tuple = dataset
A_ : Any = []
A_ : List[Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def a ( lowerCamelCase__ , lowerCamelCase__ = 0.85 ):
'''simple docstring'''
A_ : List[Any] = make_duplicate_clusters(_lowercase , _lowercase )
A_ : Union[str, Any] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
A_ : Union[str, Any] = {}
A_ : Dict = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
A_ : str = element
A_ : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
A_ : str = dataset.filter(lambda lowerCamelCase__ , lowerCamelCase__ : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
A_ : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
A_ : Tuple = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(_lowercase )}' )
print(f'Number of duplicate clusters: {len(_lowercase )}' )
print(f'Files in duplicate cluster: {len(_lowercase )}' )
print(f'Unique files in duplicate cluster: {len(_lowercase )}' )
print(f'Filtered dataset size: {len(_lowercase )}' )
return ds_filter, duplicate_clusters | 718 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCamelCase :Union[str, Any] = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
lowerCamelCase :str = json.load(f)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self , lowercase ):
return FSMTTokenizer.from_pretrained(_lowercase )
def _a (self , lowercase ):
A_ : int = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _a (self , lowercase , lowercase ):
A_ : List[Any] = F'facebook/wmt19-{pair}'
A_ : Optional[Any] = self.get_tokenizer(_lowercase )
A_ : int = self.get_model(_lowercase )
A_ : Optional[int] = bleu_data[pair]["""src"""]
A_ : Optional[Any] = bleu_data[pair]["""tgt"""]
A_ : Dict = tokenizer(_lowercase , return_tensors="""pt""" , truncation=_lowercase , padding="""longest""" ).to(_lowercase )
A_ : List[str] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A_ : Optional[Any] = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
A_ : Optional[int] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores["""bleu"""] , _lowercase ) | 719 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
A_ : Optional[Any] = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return [] | 720 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 0 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase ):
# we need a list not a string, so do something to change the type
A_ : str = arr.split(""",""" )
def _a (self ):
A_ : Optional[Any] = [int(self.array[0] )] * len(self.array )
A_ : str = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
A_ : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
A_ : Union[str, Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCamelCase :int = input('''please input some numbers:''')
lowerCamelCase :Any = SubArray(whole_array)
lowerCamelCase :int = array.solve_sub_array()
print(('''the results is:''', re)) | 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : List[str] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : Union[str, Any] = parent
A_ : Any = batch_size
A_ : Dict = seq_length
A_ : int = is_training
A_ : Any = use_input_mask
A_ : int = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Any = vocab_size
A_ : int = hidden_size
A_ : List[str] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : str = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : Optional[int] = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Optional[int] = num_labels
A_ : Optional[int] = num_choices
A_ : str = scope
A_ : Any = embedding_size
def _a (self ):
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Union[str, Any] = None
if self.use_input_mask:
A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Any = None
if self.use_token_type_ids:
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : str = None
A_ : Union[str, Any] = None
A_ : Optional[int] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
A_ : List[str] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Dict = TFMobileBertModel(config=UpperCamelCase__ )
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Tuple = model(UpperCamelCase__ )
A_ : Any = [input_ids, input_mask]
A_ : List[Any] = model(UpperCamelCase__ )
A_ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = TFMobileBertForMaskedLM(config=UpperCamelCase__ )
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[Any] = TFMobileBertForNextSentencePrediction(config=UpperCamelCase__ )
A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = TFMobileBertForPreTraining(config=UpperCamelCase__ )
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = self.num_labels
A_ : List[str] = TFMobileBertForSequenceClassification(config=UpperCamelCase__ )
A_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[int] = self.num_choices
A_ : Union[str, Any] = TFMobileBertForMultipleChoice(config=UpperCamelCase__ )
A_ : str = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ : Union[str, Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ : int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A_ : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[int] = self.num_labels
A_ : str = TFMobileBertForTokenClassification(config=UpperCamelCase__ )
A_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = TFMobileBertForQuestionAnswering(config=UpperCamelCase__ )
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self ):
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : str = config_and_inputs
A_ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def _a (self ):
A_ : Tuple = TFMobileBertModelTest.TFMobileBertModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase__ )
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase__ )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase__ )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase__ )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase__ )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase__ )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase__ )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase__ )
@slow
def _a (self ):
for model_name in ["google/mobilebert-uncased"]:
A_ : Dict = TFMobileBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : int = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
A_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ : Optional[Any] = model(UpperCamelCase__ )[0]
A_ : List[str] = [1, 6, 30522]
self.assertEqual(output.shape , UpperCamelCase__ )
A_ : Any = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) | 700 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCamelCase :Any = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _lowerCAmelCase ( snake_case_ ):
__SCREAMING_SNAKE_CASE : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def _a (self ):
A_ : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(lowercase , lowercase ):
A_ : List[Any] = v.to_dict()
return d | 701 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , lowercase=2 , ):
A_ : Optional[Any] = parent
A_ : List[Any] = batch_size
A_ : Any = image_size
A_ : List[str] = patch_size
A_ : Dict = num_channels
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_size
A_ : int = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = intermediate_size
A_ : Any = hidden_act
A_ : Dict = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = scope
A_ : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : int = (image_size // patch_size) ** 2
A_ : Tuple = num_patches + 2
def _a (self ):
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Any = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Tuple = self.get_config()
return config, pixel_values, labels
def _a (self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = DeiTModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : Optional[int] = model(lowercase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : str = 1
A_ : List[str] = DeiTForMaskedImageModeling(lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[int] = self.type_sequence_label_size
A_ : int = DeiTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : Any = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Any = 1
A_ : Union[str, Any] = DeiTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Dict = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
A_
) : str = config_and_inputs
A_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = False
def _a (self ):
A_ : List[str] = DeiTModelTester(self )
A_ : List[str] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(lowercase__ )
A_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : str = [*signature.parameters.keys()]
A_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : Tuple = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a (self ):
if not self.model_tester.is_training:
return
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : Optional[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
A_ : Optional[int] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
A_ : List[Any] = model(**lowercase__ ).loss
loss.backward()
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Union[str, Any] = False
A_ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : Any = model_class(lowercase__ )
model.gradient_checkpointing_enable()
model.to(lowercase__ )
model.train()
A_ : List[Any] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
A_ : Union[str, Any] = model(**lowercase__ ).loss
loss.backward()
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase__ ),
*get_values(lowercase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
A_ : int = problem_type["title"]
A_ : Union[str, Any] = problem_type["num_labels"]
A_ : List[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
A_ : int = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if problem_type["num_labels"] > 1:
A_ : Optional[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Optional[int] = inputs["labels"].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase__ ) as warning_list:
A_ : Any = model(**lowercase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def _a (self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = DeiTModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def a ( ):
'''simple docstring'''
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _a (self ):
A_ : str = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowercase__ )
A_ : Tuple = self.default_image_processor
A_ : Dict = prepare_img()
A_ : Dict = image_processor(images=lowercase__ , return_tensors="""pt""" ).to(lowercase__ )
# forward pass
with torch.no_grad():
A_ : List[str] = model(**lowercase__ )
# verify the logits
A_ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
A_ : Dict = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
A_ : str = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : List[str] = self.default_image_processor
A_ : str = prepare_img()
A_ : str = image_processor(images=lowercase__ , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(lowercase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[str] = model(lowercase__ )
| 702 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 0 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class _lowerCAmelCase :
def __init__(self , lowercase=None , lowercase=None ):
A_ : Tuple = list(poly_a or [0] )[:]
A_ : List[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
A_ : Any = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
A_ : Tuple = len(self.polyB )
# Add 0 to make lengths equal a power of 2
A_ : Tuple = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
A_ : List[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
A_ : Optional[int] = self.__multiply()
def _a (self , lowercase ):
A_ : Dict = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowercase ) <= 1:
return dft[0]
#
A_ : Dict = self.c_max_length // 2
while next_ncol > 0:
A_ : Optional[int] = [[] for i in range(lowercase )]
A_ : int = self.root**next_ncol
# First half of next step
A_ : Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowercase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
A_ : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowercase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
A_ : List[str] = new_dft
A_ : Optional[int] = next_ncol // 2
return dft[0]
def _a (self ):
A_ : Any = self.__dft("""A""" )
A_ : str = self.__dft("""B""" )
A_ : Tuple = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
A_ : List[str] = 2
while next_ncol <= self.c_max_length:
A_ : Optional[int] = [[] for i in range(lowercase )]
A_ : Union[str, Any] = self.root ** (next_ncol // 2)
A_ : List[Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
A_ : Optional[int] = new_inverse_c
next_ncol *= 2
# Unpack
A_ : Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ):
A_ : Optional[Any] = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
A_ : Optional[int] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
A_ : Any = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :Dict = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :int = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = cn.convert_to_negative(__lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowercase , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : Any = canny.canny(__lowercase )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(__lowercase , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Union[str, Any] = conv.img_convolve(__lowercase , __lowercase ).astype(__lowercase )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(__lowercase , 3 ).any()
def a ( ):
'''simple docstring'''
A_ : List[str] = sob.sobel_filter(__lowercase )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(__lowercase , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : int = bs.Burkes(imread(__lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(__lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(__lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : int = 0
A_ : int = 0
A_ : Tuple = image[x_coordinate][y_coordinate]
A_ : int = lbp.get_neighbors_pixel(
__lowercase , __lowercase , __lowercase , __lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : Optional[int] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : List[Any] = lbp.local_binary_value(__lowercase , __lowercase , __lowercase )
assert lbp_image.any() | 704 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_A ).to(_A )
A_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A_ : str = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
A_ : int = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
A_ : Union[str, Any] = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
A_ : Optional[int] = -(labels.shape[-1] * loss.item())
A_ : Union[str, Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 705 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : List[str]
__SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
__SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
__SCREAMING_SNAKE_CASE : ClassVar[Any] = None
__SCREAMING_SNAKE_CASE : str = field(default='Translation' , init=__a , repr=__a )
def __call__(self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _a (self ):
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Optional[List] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
__SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
__SCREAMING_SNAKE_CASE : ClassVar[Any] = None
__SCREAMING_SNAKE_CASE : str = field(default='TranslationVariableLanguages' , init=__a , repr=__a )
def _a (self ):
A_ : List[Any] = sorted(set(self.languages ) ) if self.languages else None
A_ : Dict = len(self.languages ) if self.languages else None
def __call__(self ):
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def _a (self , lowercase ):
A_ : Any = set(self.languages )
if self.languages and set(snake_case__ ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(snake_case__ ) - lang_set ) )}) are not in valid set ({", ".join(snake_case__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A_ : Tuple = []
for lang, text in translation_dict.items():
if isinstance(snake_case__ , snake_case__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A_, A_ : Tuple = zip(*sorted(snake_case__ ) )
return {"language": languages, "translation": translations}
def _a (self ):
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
} | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __lowerCamelCase ):
def __init__(self , lowercase , lowercase , lowercase ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__(self , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = "pil" , lowercase = True , **lowercase , ):
A_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
A_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : Optional[int] = {}
if accepts_eta:
A_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
A_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
A_ : str = self.vqvae.decode(a_ ).sample
A_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase :str = logging.get_logger(__name__)
lowerCamelCase :List[str] = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'marian'
__SCREAMING_SNAKE_CASE : Any = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , lowercase=58101 , lowercase=None , lowercase=1024 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=1024 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=58100 , lowercase=False , lowercase=58100 , lowercase=0 , lowercase=0 , lowercase=True , **lowercase , ):
A_ : Optional[Any] = vocab_size
A_ : int = decoder_vocab_size or vocab_size
A_ : Union[str, Any] = max_position_embeddings
A_ : str = d_model
A_ : Tuple = encoder_ffn_dim
A_ : Dict = encoder_layers
A_ : Union[str, Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Optional[Any] = decoder_layers
A_ : str = decoder_attention_heads
A_ : Union[str, Any] = dropout
A_ : Tuple = attention_dropout
A_ : List[Any] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : List[str] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Dict = use_cache
A_ : Optional[Any] = encoder_layers
A_ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _a (self ):
if self.task in ["default", "seq2seq-lm"]:
A_ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_ : Dict = {0: """batch"""}
A_ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A_ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
A_ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_, A_ : Optional[int] = self.num_layers
for i in range(lowercase ):
A_ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
else:
A_ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _a (self ):
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[int] = super().outputs
else:
A_ : List[Any] = super(lowercase , self ).outputs
if self.use_past:
A_, A_ : List[str] = self.num_layers
for i in range(lowercase ):
A_ : str = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
A_ : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
A_ : str = seq_length if not self.use_past else 1
A_ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : int = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A_ : Optional[Any] = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_, A_ : List[Any] = common_inputs["""input_ids"""].shape
A_ : int = common_inputs["""decoder_input_ids"""].shape[1]
A_, A_ : Dict = self.num_attention_heads
A_ : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Optional[Any] = decoder_seq_length + 3
A_ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowercase , lowercase )] , dim=1 )
A_ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_, A_ : Optional[int] = self.num_layers
A_ : Union[str, Any] = min(lowercase , lowercase )
A_ : Any = max(lowercase , lowercase ) - min_num_layers
A_ : str = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
A_ : List[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
A_ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_, A_ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ : Dict = seqlen + 2
A_, A_ : Optional[int] = self.num_layers
A_, A_ : Optional[int] = self.num_attention_heads
A_ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : str = common_inputs["""attention_mask"""].dtype
A_ : List[str] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
A_ : Optional[int] = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : Optional[int] = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : int = tokenizer.num_special_tokens_to_add(lowercase )
A_ : Optional[Any] = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
A_ : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : Optional[int] = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
A_ : int = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def _a (self , lowercase , lowercase , lowercase , lowercase ):
if self.task in ["default", "seq2seq-lm"]:
A_ : str = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
A_ : int = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def _a (self ):
return 1E-4 | 708 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Tuple = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
A_ : str = 1_28
elif "12-12" in model_name:
A_ : Optional[int] = 12
A_ : str = 12
elif "14-14" in model_name:
A_ : Optional[int] = 14
A_ : Optional[int] = 14
elif "16-16" in model_name:
A_ : str = 16
A_ : Union[str, Any] = 16
else:
raise ValueError("""Model not supported""" )
A_ : List[Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
A_ : Optional[int] = 35
A_ : Optional[Any] = """speech-commands-v2-id2label.json"""
else:
A_ : Optional[Any] = 5_27
A_ : Dict = """audioset-id2label.json"""
A_ : Dict = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
A_ : Tuple = {int(_A ): v for k, v in idalabel.items()}
A_ : int = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "module.v" in name:
A_ : Any = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
A_ : Dict = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
A_ : str = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
A_ : List[str] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ : Any = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
A_ : List[str] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
A_ : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A_ : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A_ : Tuple = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A_ : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
A_ : Union[str, Any] = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
A_ : Union[str, Any] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
A_ : List[Any] = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : Optional[int] = orig_state_dict.pop(_A )
if "qkv" in key:
A_ : Union[str, Any] = key.split(""".""" )
A_ : int = int(key_split[3] )
A_ : Tuple = config.hidden_size
if "weight" in key:
A_ : Union[str, Any] = val[:dim, :]
A_ : str = val[dim : dim * 2, :]
A_ : Tuple = val[-dim:, :]
else:
A_ : List[str] = val[:dim]
A_ : Dict = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : Optional[Any] = val
return orig_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : int = get_audio_spectrogram_transformer_config(_A )
A_ : Optional[Any] = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
A_ : Union[str, Any] = model_name_to_url[model_name]
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )
# remove some keys
remove_keys(_A )
# rename some keys
A_ : str = convert_state_dict(_A , _A )
# load 🤗 model
A_ : Dict = ASTForAudioClassification(_A )
model.eval()
model.load_state_dict(_A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
A_ : Dict = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
A_ : Dict = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
A_ : Dict = 10_24 if """speech-commands""" not in model_name else 1_28
A_ : str = ASTFeatureExtractor(mean=_A , std=_A , max_length=_A )
if "speech-commands" in model_name:
A_ : Optional[int] = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
A_ : Dict = dataset[0]["""audio"""]["""array"""]
else:
A_ : Tuple = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
A_, A_ : Tuple = torchaudio.load(_A )
A_ : int = waveform.squeeze().numpy()
A_ : List[str] = feature_extractor(_A , sampling_rate=1_60_00 , return_tensors="""pt""" )
# forward pass
A_ : List[Any] = model(**_A )
A_ : Optional[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
A_ : Tuple = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
A_ : Optional[int] = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
A_ : Union[str, Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
A_ : List[str] = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
A_ : str = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
A_ : int = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
A_ : str = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
A_ : Union[str, Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _A , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(_A )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 709 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 0 |
'''simple docstring'''
lowerCamelCase = 0 # The first color of the flag.
lowerCamelCase = 1 # The second color of the flag.
lowerCamelCase = 2 # The third color of the flag.
lowerCamelCase = (red, white, blue)
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not sequence:
return []
if len(A_ ) == 1:
return list(A_ )
A_ : int = 0
A_ : List[Any] = len(A_ ) - 1
A_ : Union[str, Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
A_, A_ : Optional[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
A_, A_ : str = sequence[high], sequence[mid]
high -= 1
else:
A_ : int = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(A_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = input('''Enter numbers separated by commas:\n''').strip()
lowerCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
print(F"{dutch_national_flag_sort(unsorted)}") | 710 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase :Optional[Any] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a ( lowerCamelCase__ ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
A_ : int = [image]
A_ : int = [trans(img.convert("""RGB""" ) ) for img in image]
A_ : Dict = torch.stack(lowerCamelCase__ )
return image
class _lowerCAmelCase ( snake_case__ ):
def __init__(self , lowercase , lowercase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
A_ : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_A , scheduler=_A )
def _a (self , lowercase ):
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def _a (self , lowercase , lowercase , lowercase ):
# get the original timestep using init_timestep
A_ : Optional[int] = min(int(num_inference_steps * strength ) , _A )
A_ : str = max(num_inference_steps - init_timestep , 0 )
A_ : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}' )
A_ : Union[str, Any] = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_A )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A_ : Dict = init_latents.shape
A_ : Optional[int] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
print("""add noise to latents at timestep""" , _A )
A_ : List[Any] = self.scheduler.add_noise(_A , _A , _A )
A_ : Optional[int] = init_latents
return latents
@torch.no_grad()
def __call__(self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ):
self.check_inputs(_A )
# 2. Preprocess image
A_ : int = preprocess(_A )
# 3. set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
A_ : Tuple = self.get_timesteps(_A , _A , self.device )
A_ : int = timesteps[:1].repeat(_A )
# 4. Prepare latent variables
A_ : Dict = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A )
A_ : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_A ):
# 1. predict noise model_output
A_ : Optional[Any] = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A_ : List[Any] = self.scheduler.step(
_A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample
A_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(_A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_A ) | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :Tuple = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase :Dict = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase :Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase :int = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCamelCase :Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCamelCase :Dict = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCamelCase :List[str] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase :List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase :Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Tuple = DPRContextEncoderTokenizer
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Tuple = DPRQuestionEncoderTokenizer
lowerCamelCase :Optional[int] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase :Union[str, Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase :Optional[int] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__UpperCAmelCase )
class _lowerCAmelCase :
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ):
if titles is None and texts is None:
return super().__call__(
__a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , )
elif titles is None or texts is None:
A_ : Dict = titles if texts is None else texts
return super().__call__(
__a , __a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , )
A_ : Optional[int] = titles if not isinstance(__a , __a ) else [titles]
A_ : Dict = texts if not isinstance(__a , __a ) else [texts]
A_ : List[str] = len(__a )
A_ : Tuple = questions if not isinstance(__a , __a ) else [questions] * n_passages
assert len(__a ) == len(
__a ), F'There should be as many titles than texts but got {len(__a )} titles and {len(__a )} texts.'
A_ : Dict = super().__call__(__a , __a , padding=__a , truncation=__a )["""input_ids"""]
A_ : Tuple = super().__call__(__a , add_special_tokens=__a , padding=__a , truncation=__a )["""input_ids"""]
A_ : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__a , __a )
]
}
if return_attention_mask is not False:
A_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A_ : str = attention_mask
return self.pad(__a , padding=__a , max_length=__a , return_tensors=__a )
def _a (self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ):
A_ : Any = reader_input["""input_ids"""]
A_, A_, A_ : Dict = reader_output[:3]
A_ : Dict = len(__a )
A_ : List[Any] = sorted(range(__a ) , reverse=__a , key=relevance_logits.__getitem__ )
A_ : Optional[Any] = []
for doc_id in sorted_docs:
A_ : Optional[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A_ : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A_ : Any = sequence_ids.index(self.pad_token_id )
else:
A_ : int = len(__a )
A_ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__a , top_spans=__a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__a , start_index=__a , end_index=__a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a (self , lowercase , lowercase , lowercase , lowercase , ):
A_ : int = []
for start_index, start_score in enumerate(__a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A_ : Any = sorted(__a , key=lambda lowercase : x[1] , reverse=__a )
A_ : Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
A_ : str = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = READER_PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : List[str] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Any = DPRReaderTokenizer | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCamelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 713 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = Path(A__ )
A_ : Optional[int] = Path(A__ )
dest_dir.mkdir(exist_ok=A__ )
for path in src_dir.iterdir():
A_ : Optional[int] = [x.rstrip() for x in list(path.open().readlines() )][:n]
A_ : int = dest_dir.joinpath(path.name )
print(A__ )
dest_path.open("""w""" ).write("""\n""".join(A__ ) )
if __name__ == "__main__":
fire.Fire(minify) | 714 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase :int = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__SCREAMING_SNAKE_CASE : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _a (self ):
A_ : List[str] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
A_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
A_ : int = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}] )
A_ : List[Any] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
A_ : List[Any] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
# Legacy behavior
A_ : int = text_classifier("""This is great !""" , return_all_scores=lowercase )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
A_ : Optional[int] = text_classifier("""This is great !""" , return_all_scores=lowercase )
self.assertEqual(
nested_simplify(lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}]] )
A_ : Tuple = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase )
self.assertEqual(
nested_simplify(lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
A_ : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.5_04},
{"""label""": """LABEL_0""", """score""": 0.5_04},
] , )
@require_torch
def _a (self ):
import torch
A_ : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
A_ : Any = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@require_tf
def _a (self ):
A_ : Tuple = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
A_ : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@slow
@require_torch
def _a (self ):
A_ : Optional[int] = pipeline("""text-classification""" )
A_ : int = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
A_ : Any = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
A_ : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
@slow
@require_tf
def _a (self ):
A_ : Optional[int] = pipeline("""text-classification""" , framework="""tf""" )
A_ : List[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
A_ : Union[str, Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
A_ : Optional[Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = TextClassificationPipeline(model=lowercase , tokenizer=lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _a (self , lowercase , lowercase ):
A_ : Optional[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A_ : str = "HuggingFace is in"
A_ : str = text_classifier(lowercase )
self.assertEqual(nested_simplify(lowercase ) , [{"""label""": ANY(lowercase ), """score""": ANY(lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
A_ : Dict = ["HuggingFace is in ", "Paris is in France"]
A_ : Tuple = text_classifier(lowercase )
self.assertEqual(
nested_simplify(lowercase ) , [{"""label""": ANY(lowercase ), """score""": ANY(lowercase )}, {"""label""": ANY(lowercase ), """score""": ANY(lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A_ : Optional[int] = text_classifier(lowercase , top_k=lowercase )
A_ : Optional[int] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase ) , [[{"""label""": ANY(lowercase ), """score""": ANY(lowercase )}] * N, [{"""label""": ANY(lowercase ), """score""": ANY(lowercase )}] * N] , )
A_ : Union[str, Any] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
A_ : Dict = text_classifier(lowercase )
self.assertEqual(
nested_simplify(lowercase ) , {"""label""": ANY(lowercase ), """score""": ANY(lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A_ : Optional[int] = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowercase ):
text_classifier(lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A_ : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(lowercase ) , [{"""label""": ANY(lowercase ), """score""": ANY(lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) | 715 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'xlm-roberta-xl'
def __init__(self , lowercase=250880 , lowercase=2560 , lowercase=36 , lowercase=32 , lowercase=10240 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=514 , lowercase=1 , lowercase=0.02 , lowercase=1E-05 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
A_ : List[Any] = vocab_size
A_ : Tuple = hidden_size
A_ : int = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : List[Any] = hidden_act
A_ : Any = intermediate_size
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Tuple = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : str = position_embedding_type
A_ : List[Any] = use_cache
A_ : Dict = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : int = inspect.getfile(accelerate.test_utils )
A_ : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A_ : Optional[Any] = test_metrics
@require_cpu
def _a (self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a (self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a (self ):
self.test_metrics.main()
@require_multi_gpu
def _a (self ):
print(F'Found {torch.cuda.device_count()} devices.' )
A_ : Tuple = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() ) | 717 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCamelCase :Optional[Any] = sys.version_info >= (3, 1_0)
def a ( lowerCamelCase__=None , lowerCamelCase__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : float
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : bool
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int = 42
__SCREAMING_SNAKE_CASE : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : Optional[bool] = None
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'titi'
__SCREAMING_SNAKE_CASE : Tuple = 'toto'
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'titi'
__SCREAMING_SNAKE_CASE : Dict = 'toto'
__SCREAMING_SNAKE_CASE : List[Any] = 42
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def _a (self ):
A_ : int = BasicEnum(self.foo )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def _a (self ):
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[float] = field(default=__UpperCAmelCase , metadata={'help': 'help message'} )
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
__SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
__SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
__SCREAMING_SNAKE_CASE : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : List[int] = field()
__SCREAMING_SNAKE_CASE : str = field()
__SCREAMING_SNAKE_CASE : BasicEnum = field()
def _a (self ):
A_ : Optional[int] = BasicEnum(self.required_enum )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : "BasicEnum" = field()
__SCREAMING_SNAKE_CASE : "Optional[bool]" = None
__SCREAMING_SNAKE_CASE : "str" = field(default='toto' , metadata={'help': 'help message'} )
__SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int | None = None
__SCREAMING_SNAKE_CASE : float | None = field(default=__UpperCAmelCase , metadata={'help': 'help message'} )
__SCREAMING_SNAKE_CASE : str | None = None
__SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
__SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self , lowercase , lowercase ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Optional[int] = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != '''container'''}
A_ : Tuple = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCAmelCase__ ) and yy.get("""choices""" , UpperCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCAmelCase__ ) , yy["""type"""](UpperCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _a (self ):
A_ : List[str] = HfArgumentParser(UpperCAmelCase__ )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument("""--bar""" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument("""--baz""" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument("""--flag""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs="""?""" )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : Dict = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(A_ ) : List[str] = parser.parse_args_into_dataclasses(UpperCAmelCase__ , look_for_args_file=UpperCAmelCase__ )
self.assertFalse(example.flag )
def _a (self ):
A_ : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
A_ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase__ , help="""help message""" )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _a (self ):
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
A_ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : List[str] = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
A_ : Tuple = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
A_ : Optional[Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
A_ : Union[str, Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
A_ : str = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
def _a (self ):
A_ : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
A_ : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
A_ : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Union[str, Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
A_ : Optional[int] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a (self ):
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
A_ : List[Any] = HfArgumentParser(UpperCAmelCase__ )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : int = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
A_ : Dict = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
A_ : Tuple = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _a (self ):
A_ : str = HfArgumentParser(UpperCAmelCase__ )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _a (self ):
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument("""--bar""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase__ )
A_ : List[str] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
A_ : Dict = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : List[str] = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , bar=UpperCAmelCase__ , baz=UpperCAmelCase__ , ces=[] , des=[] ) )
A_ : Dict = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _a (self ):
A_ : Optional[int] = HfArgumentParser(UpperCAmelCase__ )
A_ : str = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument("""--required_str""" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase__ , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _a (self ):
A_ : List[str] = HfArgumentParser(UpperCAmelCase__ )
A_ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase__ , )
expected.add_argument("""--opt""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _a (self ):
A_ : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
A_ : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : str = parser.parse_dict(UpperCAmelCase__ )[0]
A_ : Tuple = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _a (self ):
A_ : Union[str, Any] = HfArgumentParser(UpperCAmelCase__ )
A_ : Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(UpperCAmelCase__ , parser.parse_dict , UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
def _a (self ):
A_ : Dict = HfArgumentParser(UpperCAmelCase__ )
A_ : Any = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : List[str] = os.path.join(UpperCAmelCase__ , """temp_json""" )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : Tuple = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
A_ : Dict = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _a (self ):
A_ : Dict = HfArgumentParser(UpperCAmelCase__ )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : List[str] = os.path.join(UpperCAmelCase__ , """temp_yaml""" )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : Any = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
A_ : int = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _a (self ):
A_ : Any = HfArgumentParser(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ ) | 718 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE : List[str] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**snake_case__ )
A_ : Any = size if size is not None else {"shortest_edge": 224}
A_ : str = get_size_dict(snake_case__ , default_to_square=snake_case__ )
A_ : Dict = crop_size if crop_size is not None else {"height": 256, "width": 256}
A_ : Optional[int] = get_size_dict(snake_case__ , param_name="""crop_size""" )
A_ : Optional[Any] = do_resize
A_ : List[str] = size
A_ : Optional[int] = resample
A_ : Optional[int] = do_rescale
A_ : List[Any] = rescale_factor
A_ : int = do_center_crop
A_ : str = crop_size
A_ : Any = do_flip_channel_order
def _a (self , lowercase , lowercase , lowercase = PIL.Image.BILINEAR , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : List[str] = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _a (self , lowercase , lowercase = None ):
return flip_channel_order(snake_case__ , data_format=snake_case__ )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : List[Any] = resample if resample is not None else self.resample
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
A_ : Any = size if size is not None else self.size
A_ : Union[str, Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
A_ : str = crop_size if crop_size is not None else self.crop_size
A_ : Union[str, Any] = get_size_dict(snake_case__ , param_name="""crop_size""" )
A_ : Dict = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
A_ : str = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
A_ : List[str] = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
A_ : int = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
A_ : List[Any] = [self.flip_channel_order(image=snake_case__ ) for image in images]
A_ : Optional[Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
A_ : Any = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def _a (self , lowercase , lowercase = None ):
A_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(snake_case__ ):
A_ : Any = target_sizes.numpy()
A_ : List[str] = []
for idx in range(len(snake_case__ ) ):
A_ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=snake_case__ )
A_ : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
A_ : int = logits.argmax(dim=1 )
A_ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 719 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments | 720 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase :str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = ["""YolosFeatureExtractor"""]
lowerCamelCase :List[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE : List[str] = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase , lowercase ):
A_ : Optional[int] = False
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : str = self.tokenizer
A_ : Optional[Any] = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
return text_encoding
# add pixel_values
A_ : List[Any] = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
if text is not None:
A_ : Tuple = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
else:
A_ : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase__ )
return encoding_image_processor
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def _a (self ):
A_ : Union[str, Any] = self.tokenizer.model_input_names
A_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 700 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCamelCase :Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
A_ : Dict = self.transformer_dir
shutil.copy(
os.path.join(_lowerCamelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def _a (self ):
A_ : Any = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def _a (self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : List[str] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
A_ : Tuple = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
A_ : str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A_ : List[str] = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
A_ : Dict = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_lowerCamelCase , """w""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase )
with open(_lowerCamelCase , """r""" ) as f:
self.assertTrue(f.read() , _lowerCamelCase )
def _a (self ):
A_ : Optional[Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _lowerCamelCase ) , )
# Copy consistency with a really long name
A_ : List[Any] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , _lowerCamelCase , _lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _lowerCamelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , _lowerCamelCase ) , )
def _a (self ):
A_ : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A_ : Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A_ : Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A_ : List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A_, A_ : Any = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme["""format_model_list"""] )
self.assertFalse(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
A_, A_ : int = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCamelCase )
A_ : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A_ : int = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A_ : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A_, A_ : Union[str, Any] = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCamelCase , _lowerCamelCase ) | 701 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ : List[str] = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ : Optional[int] = f'{src_lang}-{tgt_lang}'
A_ : List[Any] = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
A_ : List[str] = os.path.join(lowerCamelCase_ , """README.md""" )
print(f'Generating {path}' )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCamelCase_ )
# make sure we are under the root of the project
lowerCamelCase :List[str] = Path(__file__).resolve().parent.parent.parent
lowerCamelCase :Union[str, Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase :int = model_name.split('''-''')
lowerCamelCase :int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 702 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :str = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE : Tuple = 'mobilenet_v1'
def __init__(self , lowercase=3 , lowercase=224 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.9_99 , lowercase=0.02 , lowercase=0.0_01 , **lowercase , ):
super().__init__(**_lowercase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
A_ : Union[str, Any] = num_channels
A_ : Union[str, Any] = image_size
A_ : str = depth_multiplier
A_ : int = min_depth
A_ : Any = hidden_act
A_ : Union[str, Any] = tf_padding
A_ : Tuple = classifier_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : str = layer_norm_eps
class _lowerCAmelCase ( UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE : Tuple = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _a (self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _a (self ):
return 1E-4 | 703 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase :List[Any] = None
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :Union[str, Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowerCamelCase :Dict = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowerCamelCase :Union[str, Any] = '''▁'''
class _lowerCAmelCase ( snake_case__ ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = AlbertTokenizer
def __init__(self , lowercase=None , lowercase=None , lowercase=True , lowercase=True , lowercase=False , lowercase="[CLS]" , lowercase="[SEP]" , lowercase="<unk>" , lowercase="[SEP]" , lowercase="<pad>" , lowercase="[CLS]" , lowercase="[MASK]" , **lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A_ : Union[str, Any] = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
A_ : Optional[Any] = do_lower_case
A_ : Tuple = remove_space
A_ : str = keep_accents
A_ : int = vocab_file
A_ : Dict = False if not self.vocab_file else True
def _a (self , lowercase , lowercase = None ):
A_ : Optional[int] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a (self , lowercase , lowercase = None ):
A_ : List[Any] = [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a (self , lowercase , lowercase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Any = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,) | 704 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a ( *lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """r""" ) as fh:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX )
try:
print(*lowerCamelCase__ )
finally:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
lowerCamelCase :Optional[Any] = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
lowerCamelCase :Optional[Any] = torch.device('''cuda''', local_rank)
lowerCamelCase :List[Any] = socket.gethostname()
lowerCamelCase :Tuple = F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCamelCase :Union[str, Any] = dist.get_rank()
lowerCamelCase :List[Any] = dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise | 705 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase :Optional[int] = 'src/transformers'
# Matches is_xxx_available()
lowerCamelCase :Dict = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase :Optional[Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase :int = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCamelCase :Tuple = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase :Optional[int] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase :Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase :Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase :Union[str, Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase :Any = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCamelCase :Any = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowerCamelCase :List[str] = re.compile(R'''^\s*else:''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
if _re_test_backend.search(_lowerCamelCase ) is None:
return None
A_ : str = [b[0] for b in _re_backend.findall(_lowerCamelCase )]
backends.sort()
return "_and_".join(_lowerCamelCase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A_ : List[Any] = f.readlines()
A_ : Any = 0
while line_index < len(_lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
A_ : Tuple = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A_ : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCamelCase ):
A_ : List[Any] = _re_one_line_import_struct.search(_lowerCamelCase ).groups()[0]
A_ : int = re.findall("""\[([^\]]+)\]""" , _lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A_ : Any = _re_import_struct_key_value.search(_lowerCamelCase )
if single_line_import_search is not None:
A_ : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A_ : int = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A_ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A_ : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A_ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A_ : Any = lines[line_index]
if _re_import_struct_add_one.search(_lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCamelCase ) is not None:
A_ : Tuple = _re_import_struct_add_many.search(_lowerCamelCase ).groups()[0].split(""", """ )
A_ : Tuple = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_between_brackets.search(_lowerCamelCase ) is not None:
A_ : int = _re_between_brackets.search(_lowerCamelCase ).groups()[0].split(""", """ )
A_ : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_quote_object.search(_lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
A_ : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A_ : str = []
while (
line_index < len(_lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A_ : Union[str, Any] = lines[line_index]
A_ : List[str] = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A_ : List[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
A_ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A_ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A_ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A_ : Optional[Any] = lines[line_index]
A_ : Dict = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
A_ : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def find_duplicates(lowerCamelCase__ ):
return [k for k, v in collections.Counter(_lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A_ : Union[str, Any] = []
for key in import_dict_objects.keys():
A_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
A_ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A_ : Optional[int] = "base imports" if key == "none" else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def a ( ):
'''simple docstring'''
A_ : int = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
A_ : Optional[int] = os.path.join(_lowerCamelCase , """__init__.py""" )
A_ : Optional[int] = parse_init(_lowerCamelCase )
if objects is not None:
A_ : Tuple = analyze_results(*_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
A_ : Dict = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(_lowerCamelCase ) )
def a ( ):
'''simple docstring'''
A_ : Optional[int] = []
for path, directories, files in os.walk(_lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A_ : Optional[int] = str((Path(_lowerCamelCase ) / folder).relative_to(_lowerCamelCase ) )
A_ : str = short_path.replace(os.path.sep , """.""" )
submodules.append(_lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
A_ : Optional[int] = str((Path(_lowerCamelCase ) / fname).relative_to(_lowerCamelCase ) )
A_ : str = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_lowerCamelCase )
return submodules
lowerCamelCase :Any = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def a ( ):
'''simple docstring'''
A_ : List[Any] = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_lowerCamelCase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A_ : Optional[int] = spec.loader.load_module()
A_ : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_lowerCamelCase ) > 0:
A_ : str = "\n".join(f'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase :Dict = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**__UpperCamelCase )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(__UpperCamelCase )
def __call__(self , lowercase , lowercase = None , **lowercase , ):
if "text_queries" in kwargs:
A_ : Any = kwargs.pop("""text_queries""" )
if isinstance(__UpperCamelCase , (str, Image.Image) ):
A_ : Dict = {"""image""": image, """candidate_labels""": candidate_labels}
else:
A_ : List[Any] = image
A_ : str = super().__call__(__UpperCamelCase , **__UpperCamelCase )
return results
def _a (self , **lowercase ):
A_ : Optional[int] = {}
if "threshold" in kwargs:
A_ : List[str] = kwargs["""threshold"""]
if "top_k" in kwargs:
A_ : Optional[int] = kwargs["""top_k"""]
return {}, {}, postprocess_params
def _a (self , lowercase ):
A_ : List[str] = load_image(inputs["""image"""] )
A_ : List[str] = inputs["""candidate_labels"""]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A_ : int = candidate_labels.split(""",""" )
A_ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__UpperCamelCase ):
A_ : Union[str, Any] = self.tokenizer(__UpperCamelCase , return_tensors=self.framework )
A_ : Any = self.image_processor(__UpperCamelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__UpperCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _a (self , lowercase ):
A_ : List[Any] = model_inputs.pop("""target_size""" )
A_ : Dict = model_inputs.pop("""candidate_label""" )
A_ : Union[str, Any] = model_inputs.pop("""is_last""" )
A_ : Union[str, Any] = self.model(**__UpperCamelCase )
A_ : List[Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def _a (self , lowercase , lowercase=0.1 , lowercase=None ):
A_ : Optional[Any] = []
for model_output in model_outputs:
A_ : Tuple = model_output["""candidate_label"""]
A_ : Union[str, Any] = BaseModelOutput(__UpperCamelCase )
A_ : Optional[int] = self.image_processor.post_process_object_detection(
outputs=__UpperCamelCase , threshold=__UpperCamelCase , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
A_ : List[Any] = outputs["""scores"""][index].item()
A_ : str = self._get_bounding_box(outputs["""boxes"""][index][0] )
A_ : Any = {"""score""": score, """label""": label, """box""": box}
results.append(__UpperCamelCase )
A_ : Any = sorted(__UpperCamelCase , key=lambda lowercase : x["score"] , reverse=__UpperCamelCase )
if top_k:
A_ : str = results[:top_k]
return results
def _a (self , lowercase ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
A_, A_, A_, A_ : Tuple = box.int().tolist()
A_ : str = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A_ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE : List[Any] = '''FlavaImageProcessor'''
__SCREAMING_SNAKE_CASE : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : Dict = kwargs.pop("""feature_extractor""" )
A_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
A_ : Optional[Any] = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = False , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A_ : Optional[int] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
if images is not None:
A_ : Any = self.image_processor(
lowercase , return_image_mask=lowercase , return_codebook_pixels=lowercase , return_tensors=lowercase , **lowercase , )
if text is not None and images is not None:
encoding.update(lowercase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
A_ : int = self.tokenizer.model_input_names
A_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 708 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 0 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self ):
A_ : Optional[Any] = {}
def _a (self ):
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase__ , """ -> """ , """ -> """.join([str(UpperCamelCase__ ) for j in self.vertex[i]] ) )
def _a (self , lowercase , lowercase ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase__ )
else:
# else make a new vertex
A_ : Union[str, Any] = [to_vertex]
def _a (self ):
# visited array for storing already visited nodes
A_ : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
def _a (self , lowercase , lowercase ):
# mark start vertex as visited
A_ : str = True
print(UpperCamelCase__ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Tuple = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 709 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCamelCase :List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase :Tuple = 2_5_6
class _lowerCAmelCase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : str = ['''melgan''']
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , ):
super().__init__()
# From MELGAN
A_ : Tuple = math.log(1E-5 ) # Matches MelGAN training.
A_ : List[str] = 4.0 # Largest value for most examples
A_ : List[Any] = 128
self.register_modules(
notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , )
def _a (self , lowercase , lowercase=(-1.0, 1.0) , lowercase=False ):
A_, A_ : Union[str, Any] = output_range
if clip:
A_ : List[Any] = torch.clip(UpperCamelCase__ , self.min_value , self.max_value )
# Scale to [0, 1].
A_ : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _a (self , lowercase , lowercase=(-1.0, 1.0) , lowercase=False ):
A_, A_ : Tuple = input_range
A_ : Optional[int] = torch.clip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if clip else outputs
# Scale to [0, 1].
A_ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[int] = input_tokens > 0
A_, A_ : Dict = self.notes_encoder(
encoder_input_tokens=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__ )
A_, A_ : str = self.continuous_encoder(
encoder_inputs=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[int] = noise_time
if not torch.is_tensor(UpperCamelCase__ ):
A_ : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0:
A_ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A_ : int = self.decoder(
encodings_and_masks=UpperCamelCase__ , decoder_input_tokens=UpperCamelCase__ , decoder_noise_time=UpperCamelCase__ )
return logits
@torch.no_grad()
def __call__(self , lowercase , lowercase = None , lowercase = 100 , lowercase = True , lowercase = "numpy" , lowercase = None , lowercase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase__ )}.' )
A_ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A_ : Optional[Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
A_ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase__ ):
if i == 0:
A_ : Tuple = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A_ : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A_ : Optional[int] = ones
A_ : Tuple = self.scale_features(
UpperCamelCase__ , output_range=[-1.0, 1.0] , clip=UpperCamelCase__ )
A_ : Optional[int] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase__ , continuous_mask=UpperCamelCase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A_ : Tuple = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A_ : List[str] = self.decode(
encodings_and_masks=UpperCamelCase__ , input_tokens=UpperCamelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A_ : Any = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
A_ : Optional[int] = self.scale_to_features(UpperCamelCase__ , input_range=[-1.0, 1.0] )
A_ : Tuple = mel[:1]
A_ : str = mel.cpu().float().numpy()
A_ : List[str] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ )
logger.info("""Generated segment""" , UpperCamelCase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
A_ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A_ : Optional[int] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase__ ) | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 0 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = int(__lowerCAmelCase )
A_, A_, A_ : Optional[Any] = t // 36_00, (t // 60) % 60, t % 60
return f'{h}:{m:02d}:{s:02d}' if h != 0 else f'{m:02d}:{s:02d}'
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=3_00 ):
'''simple docstring'''
return f'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
A_ : str = f'{elt:.6f}' if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else str(__lowerCAmelCase )
html_code += f' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : List[Any] = 5
__SCREAMING_SNAKE_CASE : Dict = 0.2
def __init__(self , lowercase , lowercase = None , lowercase = True , lowercase = None , lowercase = 300 , ):
A_ : Any = total
A_ : Optional[Any] = """""" if prefix is None else prefix
A_ : List[Any] = leave
A_ : List[str] = parent
A_ : List[Any] = width
A_ : Optional[Any] = None
A_ : List[Any] = None
A_ : int = None
def _a (self , lowercase , lowercase = False , lowercase = None ):
A_ : Optional[Any] = value
if comment is not None:
A_ : Optional[Any] = comment
if self.last_value is None:
A_ : Any = time.time()
A_ : int = value
A_ : Union[str, Any] = None
A_ : Union[str, Any] = self.warmup
A_ : Any = 1
self.update_bar(lowerCAmelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
A_ : Union[str, Any] = time.time()
A_ : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
A_ : Optional[Any] = self.elapsed_time / (value - self.start_value)
else:
A_ : List[Any] = None
if value >= self.total:
A_ : Tuple = self.total
A_ : int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
A_ : List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase_ )
A_ : Tuple = value
A_ : int = current_time
if self.average_time_per_item is None:
A_ : Union[str, Any] = 1
else:
A_ : Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _a (self , lowercase , lowercase=None ):
A_ : Any = """ """ * (len(str(self.total ) ) - len(str(lowerCAmelCase_ ) )) + str(lowerCAmelCase_ )
if self.elapsed_time is None:
A_ : List[str] = F'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
A_ : Any = F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
A_ : str = (
F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
F' {format_time(self.predicted_remaining )}'
)
self.label += F', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F', {self.comment}]'
self.display()
def _a (self ):
A_ : List[str] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
A_ : Tuple = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _a (self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowerCAmelCase_ )
A_ : List[Any] = None if column_names is None else [column_names]
A_ : Any = None
def _a (self ):
A_ : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
A_ : Any = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _a (self , lowercase ):
if self.inner_table is None:
A_ : str = [list(values.keys() ), list(values.values() )]
else:
A_ : List[str] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase_ )
A_ : List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def _a (self , lowercase , lowercase=None , lowercase=300 ):
A_ : List[Any] = NotebookProgressBar(lowerCAmelCase_ , prefix=lowerCAmelCase_ , parent=self , width=lowerCAmelCase_ )
return self.child_bar
def _a (self ):
A_ : int = None
self.display()
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self ):
A_ : List[Any] = None
A_ : Tuple = None
A_ : List[str] = False
def _a (self , lowercase , lowercase , lowercase , **lowercase ):
A_ : Union[str, Any] = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
A_ : str = 0
A_ : str = 0
A_ : List[Any] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
A_ : Union[str, Any] = NotebookTrainingTracker(state.max_steps , lowerCAmelCase_ )
def _a (self , lowercase , lowercase , lowercase , **lowercase ):
A_ : Optional[int] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=F'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
A_ : str = False
def _a (self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
if not has_length(lowerCAmelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
A_ : List[Any] = self.training_tracker.add_child(len(lowerCAmelCase_ ) )
else:
A_ : Union[str, Any] = NotebookProgressBar(len(lowerCAmelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _a (self , lowercase , lowercase , lowercase , **lowercase ):
if self.prediction_bar is not None:
self.prediction_bar.close()
A_ : int = None
def _a (self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
A_ : List[Any] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
A_ : List[str] = state.global_step
self.training_tracker.write_line(lowerCAmelCase_ )
def _a (self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
if self.training_tracker is not None:
A_ : Any = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
A_ : Optional[Any] = log["""loss"""]
break
if self.first_column == "Epoch":
A_ : List[str] = int(state.epoch )
else:
A_ : Tuple = state.global_step
A_ : Tuple = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
A_ : Dict = re.sub(R"""\_loss$""" , """""" , lowerCAmelCase_ )
A_ : Optional[Any] = metrics.pop("""total_flos""" , lowerCAmelCase_ )
A_ : List[Any] = metrics.pop("""epoch""" , lowerCAmelCase_ )
A_ : Union[str, Any] = metrics.pop(F'{metric_key_prefix}_runtime' , lowerCAmelCase_ )
A_ : Optional[Any] = metrics.pop(F'{metric_key_prefix}_samples_per_second' , lowerCAmelCase_ )
A_ : Optional[Any] = metrics.pop(F'{metric_key_prefix}_steps_per_second' , lowerCAmelCase_ )
A_ : Dict = metrics.pop(F'{metric_key_prefix}_jit_compilation_time' , lowerCAmelCase_ )
for k, v in metrics.items():
if k == F'{metric_key_prefix}_loss':
A_ : Tuple = v
else:
A_ : int = k.split("""_""" )
A_ : Dict = """ """.join([part.capitalize() for part in splits[1:]] )
A_ : Union[str, Any] = v
self.training_tracker.write_line(lowerCAmelCase_ )
self.training_tracker.remove_child()
A_ : Optional[Any] = None
# Evaluation takes a long time so we should force the next update.
A_ : Union[str, Any] = True
def _a (self , lowercase , lowercase , lowercase , **lowercase ):
self.training_tracker.update(
state.global_step , comment=F'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=lowerCAmelCase_ )
A_ : Tuple = None | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :Optional[Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Union[str, Any] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 713 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = botoa.client("""iam""" )
A_ : str = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=UpperCAmelCase__ , AssumeRolePolicyDocument=json.dumps(UpperCAmelCase__ , indent=2 ) )
A_ : str = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=UpperCAmelCase__ , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(UpperCAmelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'role {role_name} already exists. Using existing one' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = botoa.client("""iam""" )
return iam_client.get_role(RoleName=UpperCAmelCase__ )["Role"]["Arn"]
def a ( ):
'''simple docstring'''
A_ : Dict = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , UpperCAmelCase__ , )
A_ : Tuple = None
if credentials_configuration == 0:
A_ : Union[str, Any] = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
A_ : str = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
A_ : Tuple = _ask_field("""AWS Access Key ID: """ )
A_ : Dict = aws_access_key_id
A_ : int = _ask_field("""AWS Secret Access Key: """ )
A_ : Any = aws_secret_access_key
A_ : Optional[Any] = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
A_ : Any = aws_region
A_ : str = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , UpperCAmelCase__ , )
if role_management == 0:
A_ : Union[str, Any] = _ask_field("""Enter your IAM role name: """ )
else:
A_ : List[Any] = """accelerate_sagemaker_execution_role"""
print(f'Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials' )
_create_iam_role_for_sagemaker(UpperCAmelCase__ )
A_ : Optional[Any] = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message="""Please enter yes or no.""" , )
A_ : Union[str, Any] = None
if is_custom_docker_image:
A_ : Dict = _ask_field("""Enter your Docker image: """ , lambda lowerCamelCase__ : str(UpperCAmelCase__ ).lower() )
A_ : int = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message="""Please enter yes or no.""" , )
A_ : List[str] = None
if is_sagemaker_inputs_enabled:
A_ : List[str] = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda lowerCamelCase__ : str(UpperCAmelCase__ ).lower() , )
A_ : Optional[Any] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message="""Please enter yes or no.""" , )
A_ : int = None
if is_sagemaker_metrics_enabled:
A_ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda lowerCamelCase__ : str(UpperCAmelCase__ ).lower() , )
A_ : Any = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
A_ : List[str] = {}
A_ : int = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
A_ : Tuple = """dynamo_"""
A_ : List[str] = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A_ : Union[str, Any] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
A_ : Optional[Any] = _ask_options(
"""Which mode do you want to use?""" , UpperCAmelCase__ , lambda lowerCamelCase__ : TORCH_DYNAMO_MODES[int(UpperCAmelCase__ )] , default="""default""" , )
A_ : List[Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message="""Please enter yes or no.""" , )
A_ : Any = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message="""Please enter yes or no.""" , )
A_ : List[str] = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
A_ : Union[str, Any] = _ask_options(
UpperCAmelCase__ , UpperCAmelCase__ , lambda lowerCamelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCAmelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A_ : List[Any] = _ask_field(UpperCAmelCase__ , lambda lowerCamelCase__ : str(UpperCAmelCase__ ).lower() , default="""ml.p3.2xlarge""" )
A_ : List[str] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A_ : List[str] = _ask_field(
"""How many machines do you want use? [1]: """ , UpperCAmelCase__ , default=1 , )
A_ : Optional[Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=UpperCAmelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCAmelCase__ , use_cpu=UpperCAmelCase__ , dynamo_config=UpperCAmelCase__ , eca_instance_type=UpperCAmelCase__ , profile=UpperCAmelCase__ , region=UpperCAmelCase__ , iam_role_name=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ , num_machines=UpperCAmelCase__ , sagemaker_inputs_file=UpperCAmelCase__ , sagemaker_metrics_file=UpperCAmelCase__ , ) | 714 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase :int = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
requires_backends(self , ["""bs4"""] )
super().__init__(**__UpperCamelCase )
def _a (self , lowercase ):
A_ : str = []
A_ : Optional[int] = []
A_ : Optional[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
A_ : Tuple = parent.find_all(child.name , recursive=__UpperCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__UpperCamelCase ) else next(i for i, s in enumerate(__UpperCamelCase , 1 ) if s is child ) )
A_ : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _a (self , lowercase ):
A_ : List[Any] = BeautifulSoup(__UpperCamelCase , """html.parser""" )
A_ : List[str] = []
A_ : Union[str, Any] = []
A_ : List[Any] = []
for element in html_code.descendants:
if type(__UpperCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
A_ : int = html.unescape(__UpperCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__UpperCamelCase )
A_, A_ : Any = self.xpath_soup(__UpperCamelCase )
stringaxtag_seq.append(__UpperCamelCase )
stringaxsubs_seq.append(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _a (self , lowercase , lowercase ):
A_ : List[str] = """"""
for tagname, subs in zip(__UpperCamelCase , __UpperCamelCase ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__(self , lowercase ):
A_ : Optional[int] = False
# Check that strings has a valid type
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A_ : Optional[int] = True
elif isinstance(__UpperCamelCase , (list, tuple) ):
if len(__UpperCamelCase ) == 0 or isinstance(html_strings[0] , __UpperCamelCase ):
A_ : List[Any] = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F'but is of type {type(__UpperCamelCase )}.' )
A_ : Union[str, Any] = bool(isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __UpperCamelCase )) )
if not is_batched:
A_ : Union[str, Any] = [html_strings]
# Get nodes + xpaths
A_ : List[str] = []
A_ : str = []
for html_string in html_strings:
A_, A_, A_ : Optional[Any] = self.get_three_from_single(__UpperCamelCase )
nodes.append(__UpperCamelCase )
A_ : Any = []
for node, tag_list, sub_list in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ : List[Any] = self.construct_xpath(__UpperCamelCase , __UpperCamelCase )
xpath_strings.append(__UpperCamelCase )
xpaths.append(__UpperCamelCase )
# return as Dict
A_ : int = {"""nodes""": nodes, """xpaths""": xpaths}
A_ : Tuple = BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
return encoded_inputs | 715 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :Dict = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = AlbertTokenizer
__SCREAMING_SNAKE_CASE : Tuple = AlbertTokenizerFast
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
def _a (self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Optional[int] = AlbertTokenizer(lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self , lowercase ):
A_ : Dict = """this is a test"""
A_ : Optional[int] = """this is a test"""
return input_text, output_text
def _a (self ):
A_ : Optional[int] = """<pad>"""
A_ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(lowercase ) , 30000 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _a (self ):
if not self.test_rust_tokenizer:
return
A_ : Tuple = self.get_tokenizer()
A_ : Optional[int] = self.get_rust_tokenizer()
A_ : Tuple = """I was born in 92000, and this is falsé."""
A_ : Tuple = tokenizer.tokenize(lowercase )
A_ : str = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Tuple = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : Optional[Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Optional[Any] = self.get_rust_tokenizer()
A_ : Optional[int] = tokenizer.encode(lowercase )
A_ : List[Any] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Tuple = AlbertTokenizer(lowercase , keep_accents=lowercase )
A_ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [48, 25, 21, 1289] )
A_ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
A_ : Any = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
A_ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _a (self ):
A_ : int = AlbertTokenizer(lowercase )
A_ : Optional[int] = tokenizer.encode("""sequence builders""" )
A_ : Optional[int] = tokenizer.encode("""multi-sequence build""" )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _a (self ):
A_ : int = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , ) | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :int = {}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'llama'
__SCREAMING_SNAKE_CASE : Any = ['past_key_values']
def __init__(self , lowercase=32000 , lowercase=4096 , lowercase=11008 , lowercase=32 , lowercase=32 , lowercase=None , lowercase="silu" , lowercase=2048 , lowercase=0.02 , lowercase=1E-6 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=1 , lowercase=False , lowercase=None , **lowercase , ):
A_ : str = vocab_size
A_ : Optional[Any] = max_position_embeddings
A_ : Any = hidden_size
A_ : Any = intermediate_size
A_ : Tuple = num_hidden_layers
A_ : Dict = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
A_ : str = num_attention_heads
A_ : Optional[Any] = num_key_value_heads
A_ : Optional[int] = hidden_act
A_ : Dict = initializer_range
A_ : str = rms_norm_eps
A_ : Dict = pretraining_tp
A_ : Optional[Any] = use_cache
A_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase , )
def _a (self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
A_ : Dict = self.rope_scaling.get("""type""" , lowercase )
A_ : Any = self.rope_scaling.get("""factor""" , lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowercase , lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' ) | 717 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def _a (*lowercase , **lowercase ):
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a (self , lowercase , lowercase , lowercase ):
A_ : str = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ : Optional[int] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def _a (self , lowercase , lowercase ):
A_ : Dict = object_detector(examples[0] , threshold=0.0 )
A_ : List[Any] = len(lowerCamelCase__ )
self.assertGreater(lowerCamelCase__ , 0 )
self.assertEqual(
lowerCamelCase__ , [
{
"""score""": ANY(lowerCamelCase__ ),
"""label""": ANY(lowerCamelCase__ ),
"""box""": {"""xmin""": ANY(lowerCamelCase__ ), """ymin""": ANY(lowerCamelCase__ ), """xmax""": ANY(lowerCamelCase__ ), """ymax""": ANY(lowerCamelCase__ )},
}
for i in range(lowerCamelCase__ )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _a (self ):
pass
@require_torch
def _a (self ):
A_ : Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ : Union[str, Any] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
A_ : Union[str, Any] = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
[
{"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _a (self ):
A_ : int = pipeline("""zero-shot-object-detection""" )
A_ : Tuple = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
A_ : str = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
[
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _a (self ):
pass
@require_torch
@slow
def _a (self ):
A_ : Union[str, Any] = 0.2
A_ : Optional[Any] = pipeline("""zero-shot-object-detection""" )
A_ : Tuple = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCamelCase__ , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _a (self ):
A_ : Dict = 2
A_ : int = pipeline("""zero-shot-object-detection""" )
A_ : List[str] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCamelCase__ , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , ) | 718 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 0 |
import random
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
A_ : dict = {i: [] for i in range(snake_case_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case_ ):
for j in range(i + 1 , snake_case_ ):
if random.random() < probability:
graph[i].append(snake_case_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case_ )
return graph
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {
i: [j for j in range(snake_case_ ) if i != j] for i in range(snake_case_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 719 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 0 |
'''simple docstring'''
import os
import sys
lowerCamelCase :List[Any] = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase :Any = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ ) | 720 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCamelCase :List[str] = threading.Lock()
lowerCamelCase :Optional[int] = None
lowerCamelCase :Tuple = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCamelCase :Optional[int] = logging.WARNING
lowerCamelCase :List[str] = True
def a ( ):
'''simple docstring'''
A_ : Any = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def a ( ):
'''simple docstring'''
return __name__.split(""".""" )[0]
def a ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def a ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
A_ : Any = logging.StreamHandler() # Set sys.stderr as stream.
A_ : Tuple = sys.stderr.flush
# Apply our default configuration to the library root logger.
A_ : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
A_ : Optional[int] = False
def a ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
A_ : Optional[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
A_ : Any = None
def a ( ):
'''simple docstring'''
return log_levels
def a ( lowerCamelCase__ = None ):
'''simple docstring'''
if name is None:
A_ : int = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a ( lowerCamelCase__ ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a ( lowerCamelCase__ ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
_configure_library_root_logger()
A_ : Optional[int] = False
def a ( ):
'''simple docstring'''
_configure_library_root_logger()
A_ : Optional[int] = True
def a ( ):
'''simple docstring'''
A_ : List[str] = _get_library_root_logger().handlers
for handler in handlers:
A_ : str = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
A_ : Optional[int] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase__ )
def a ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , lowerCamelCase__ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase :Tuple = warning_advice
@functools.lru_cache(lowerCamelCase__ )
def a ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
self.warning(*lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase :Tuple = warning_once
class _lowerCAmelCase :
def __init__(self , *lowercase , **lowercase ): # pylint: disable=unused-argument
A_ : Any = args[0] if args else None
def __iter__(self ):
return iter(self._iterator )
def __getattr__(self , lowercase ):
def empty_fn(*lowercase , **lowercase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ):
return self
def __exit__(self , lowercase , lowercase , lowercase ):
return
class _lowerCAmelCase :
def __call__(self , *lowercase , **lowercase ):
if _tqdm_active:
return tqdm_lib.tqdm(*lowercase , **lowercase )
else:
return EmptyTqdm(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
A_ : int = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase , **lowercase )
def _a (self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase :Optional[int] = _tqdm_cls()
def a ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def a ( ):
'''simple docstring'''
global _tqdm_active
A_ : Dict = True
hf_hub_utils.enable_progress_bars()
def a ( ):
'''simple docstring'''
global _tqdm_active
A_ : List[Any] = False
hf_hub_utils.disable_progress_bars() | 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
A_ : Any = """"""
A_ : Any = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCamelCase__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
A_ : str = 0, 0
# length[i] shows the length of palindromic substring with center i
A_ : List[str] = [1 for i in range(len(lowerCamelCase__ ) )]
# for each character in new_string find corresponding palindromic string
A_ : Dict = 0
for j in range(len(lowerCamelCase__ ) ):
A_ : Optional[int] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCamelCase__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
A_ : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
A_ : Any = j - k + 1 # noqa: E741
A_ : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
A_ : str = length[j]
A_ : List[Any] = j
# create that string
A_ : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase :Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase :List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "://" in dataset_path:
A_ : Union[str, Any] = dataset_path.split("""://""" )[1]
return dataset_path
def a ( lowerCamelCase__ ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = not is_remote_filesystem(lowerCamelCase__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase__ ) , fs._strip_protocol(lowerCamelCase__ ) )
else:
fs.mv(lowerCamelCase__ , lowerCamelCase__ , recursive=lowerCamelCase__ )
def a ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A_ : Tuple = None
A_ : Union[str, Any] = None
A_ : List[Any] = threading.Lock() | 701 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
A_ : List[Any] = True
for i in range(0 , len(lowerCamelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
A_ : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
A_ : Any = False
for i in range(1 , len(lowerCamelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
A_ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
A_ : Dict = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCamelCase :List[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase :Union[str, Any] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 702 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 0 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def a ( lowerCamelCase__ , lowerCamelCase__=10_00 ) -> Optional[Any]:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A_ : Optional[int] = n - 1
A_ : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A_ : Optional[Any] = 0
while count < prec:
A_ : List[str] = random.randint(2 , n - 1 )
A_ : str = bin_exp_mod(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if b != 1:
A_ : Optional[Any] = True
for _ in range(lowerCamelCase__ ):
if b == n - 1:
A_ : Optional[int] = False
break
A_ : Any = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase :Optional[int] = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 703 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCamelCase :Optional[List[str]] = None
lowerCamelCase :Any = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCamelCase :str = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
__SCREAMING_SNAKE_CASE : ClassVar[str] = "PIL.Image.Image"
__SCREAMING_SNAKE_CASE : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__SCREAMING_SNAKE_CASE : str = field(default='Image' , init=__UpperCAmelCase , repr=__UpperCAmelCase )
def __call__(self ):
return self.pa_type
def _a (self , lowercase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = np.array(lowercase )
if isinstance(lowercase , lowercase ):
return {"path": value, "bytes": None}
elif isinstance(lowercase , lowercase ):
return {"path": None, "bytes": value}
elif isinstance(lowercase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase )
elif isinstance(lowercase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _a (self , lowercase , lowercase=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
A_ : Union[str, Any] = {}
A_ : Optional[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(lowercase ):
A_ : Optional[int] = PIL.Image.open(lowercase )
else:
A_ : Optional[int] = path.split("""::""" )[-1]
try:
A_ : Tuple = string_to_dict(lowercase , config.HUB_DATASETS_URL )["""repo_id"""]
A_ : List[str] = token_per_repo_id.get(lowercase )
except ValueError:
A_ : Optional[int] = None
with xopen(lowercase , """rb""" , use_auth_token=lowercase ) as f:
A_ : str = BytesIO(f.read() )
A_ : str = PIL.Image.open(bytes_ )
else:
A_ : List[str] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _a (self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def _a (self , lowercase ):
if pa.types.is_string(storage.type ):
A_ : int = pa.array([None] * len(lowercase ) , type=pa.binary() )
A_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : Optional[Any] = pa.array([None] * len(lowercase ) , type=pa.string() )
A_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A_ : int = storage.field("""bytes""" )
else:
A_ : List[Any] = pa.array([None] * len(lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A_ : List[Any] = storage.field("""path""" )
else:
A_ : str = pa.array([None] * len(lowercase ) , type=pa.string() )
A_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ : List[Any] = pa.array(
[encode_np_array(np.array(lowercase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ : Dict = pa.array([None] * len(lowercase ) , type=pa.string() )
A_ : Union[str, Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
def _a (self , lowercase ):
@no_op_if_value_is_null
def path_to_bytes(lowercase ):
with xopen(lowercase , """rb""" ) as f:
A_ : int = f.read()
return bytes_
A_ : int = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : List[str] = pa.array(
[os.path.basename(lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A_ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
def a ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = BytesIO()
if image.format in list_image_compression_formats():
A_ : List[str] = image.format
else:
A_ : Optional[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowerCamelCase__ , format=lowerCamelCase__ )
return buffer.getvalue()
def a ( lowerCamelCase__ ):
'''simple docstring'''
if hasattr(lowerCamelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
A_ : List[Any] = array.dtype
A_ : Dict = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
A_ : List[Any] = dtype.kind
A_ : str = dtype.itemsize
A_ : str = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ : Union[str, Any] = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ : Optional[int] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ : Optional[int] = dtype_byteorder + dtype_kind + str(lowerCamelCase__ )
A_ : Dict = np.dtype(lowerCamelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
A_ : Dict = PIL.Image.fromarray(array.astype(lowerCamelCase__ ) )
return {"path": None, "bytes": image_to_bytes(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
A_ : Union[str, Any] = first_non_null_value(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCamelCase__ , np.ndarray ):
A_ : Optional[Any] = no_op_if_value_is_null(lowerCamelCase__ )
return [obj_to_image_dict_func(lowerCamelCase__ ) for obj in objs]
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
A_ : str = no_op_if_value_is_null(lowerCamelCase__ )
return [obj_to_image_dict_func(lowerCamelCase__ ) for obj in objs]
else:
return objs
else:
return objs | 704 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'audio-spectrogram-transformer'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=1024 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : int = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : List[Any] = patch_size
A_ : Optional[Any] = qkv_bias
A_ : Tuple = frequency_stride
A_ : str = time_stride
A_ : Tuple = max_length
A_ : List[str] = num_mel_bins | 705 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase :List[Any] = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def _a (cls ):
A_ : Any = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def _a (cls ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _a (self ):
A_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ : List[str] = FlaxBertModel(lowercase )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
A_ : List[str] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
A_ : Dict = flatten_dict(unfreeze(model.params ) )
A_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase , repo_id="""test-model-flax""" , push_to_hub=lowercase , use_auth_token=self._token )
A_ : List[Any] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
A_ : str = flatten_dict(unfreeze(model.params ) )
A_ : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' )
def _a (self ):
A_ : Tuple = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ : int = FlaxBertModel(lowercase )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
A_ : Dict = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A_ : str = flatten_dict(unfreeze(model.params ) )
A_ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowercase , use_auth_token=self._token )
A_ : Union[str, Any] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A_ : List[str] = flatten_dict(unfreeze(model.params ) )
A_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = True
A_ : Any = flatten_dict(modela.params )
A_ : Any = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ : int = False
return models_are_equal
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A_ : Any = FlaxBertModel(lowercase )
A_ : List[str] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase , lowercase ) )
with self.assertRaises(lowercase ):
A_ : Dict = FlaxBertModel.from_pretrained(lowercase )
A_ : Tuple = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertTrue(check_models_equal(lowercase , lowercase ) )
def _a (self ):
A_ : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A_ : Tuple = FlaxBertModel(lowercase )
A_ : Optional[Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase , lowercase ) , max_shard_size="""10KB""" )
with self.assertRaises(lowercase ):
A_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase )
A_ : Optional[Any] = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertTrue(check_models_equal(lowercase , lowercase ) )
def _a (self ):
A_ : Tuple = """bert"""
A_ : Union[str, Any] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowercase ):
A_ : Dict = FlaxBertModel.from_pretrained(lowercase )
A_ : Union[str, Any] = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertIsNotNone(lowercase )
def _a (self ):
A_ : Union[str, Any] = """bert"""
A_ : Union[str, Any] = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowercase ):
A_ : List[Any] = FlaxBertModel.from_pretrained(lowercase )
A_ : Tuple = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertIsNotNone(lowercase ) | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = tmp_path / """cache"""
A_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[int] = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = tmp_path / """cache"""
A_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Optional[Any] = features.copy() if features else default_expected_features
A_ : Optional[int] = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Dict = ParquetDatasetReader(lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = tmp_path / """cache"""
A_ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Any = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , split=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : List[str] = parquet_path
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : int = [parquet_path]
A_ : List[Any] = tmp_path / """cache"""
A_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : str = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
for split in splits:
A_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = tmp_path / """cache"""
A_ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = tmp_path / """cache"""
A_ : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : List[Any] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : str = ParquetDatasetReader({"""train""": parquet_path} , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if split:
A_ : Tuple = {split: parquet_path}
else:
A_ : List[Any] = """train"""
A_ : str = {"""train""": parquet_path, """test""": parquet_path}
A_ : List[Any] = tmp_path / """cache"""
A_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Optional[Any] = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
A_ : Dict = pq.ParquetFile(tmp_path / """foo.parquet""" )
A_ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
A_ : Tuple = {"""image""": [image_path]}
A_ : Tuple = Features({"""image""": Image()} )
A_ : str = Dataset.from_dict(lowerCamelCase__ , features=lowerCamelCase__ )
A_ : List[str] = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
A_ : Optional[Any] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
A_ : List[str] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=lowerCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert get_writer_batch_size(lowerCamelCase__ ) == expected
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
from torch import nn
def a ( lowerCamelCase__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' ) | 708 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase="divided_space_time" , lowercase=None , ):
A_ : Tuple = parent
A_ : Optional[int] = batch_size
A_ : Optional[int] = image_size
A_ : str = num_channels
A_ : Any = patch_size
A_ : Union[str, Any] = num_frames
A_ : Dict = is_training
A_ : Dict = use_labels
A_ : List[str] = hidden_size
A_ : int = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : int = intermediate_size
A_ : Any = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : List[str] = attention_type
A_ : str = initializer_range
A_ : List[Any] = scope
A_ : Dict = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A_ : List[Any] = (image_size // patch_size) ** 2
A_ : Any = (num_frames) * self.num_patches_per_frame + 1
def _a (self ):
A_ : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A_ : str = None
if self.use_labels:
A_ : int = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
A_ : List[str] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
A_ : List[str] = self.num_labels
return config
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = TimesformerModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[int] = TimesformerForVideoClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase )
# verify the logits shape
A_ : int = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase )
def _a (self ):
A_ : Tuple = self.prepare_config_and_inputs()
A_ : str = config_and_inputs
A_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Dict = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : int = False
def _a (self ):
A_ : int = TimesformerModelTester(self )
A_ : Dict = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : Any = copy.deepcopy(lowercase )
if return_labels:
if model_class in get_values(lowercase ):
A_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(lowercase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase )
@slow
def _a (self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = TimesformerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _a (self ):
if not self.has_attentions:
pass
else:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = True
for model_class in self.all_model_classes:
A_ : List[Any] = self.model_tester.seq_length
A_ : Union[str, Any] = self.model_tester.num_frames
A_ : Any = True
A_ : int = False
A_ : List[str] = True
A_ : Any = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : int = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : Optional[int] = True
A_ : Optional[Any] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Dict = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
A_ : Optional[int] = len(lowercase )
# Check attention is always last and order is fine
A_ : str = True
A_ : Optional[int] = True
A_ : List[Any] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
A_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[str] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Union[str, Any] = outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase ) , lowercase )
A_ : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def a ( ):
'''simple docstring'''
A_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A_ : List[str] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a (self ):
A_ : Union[str, Any] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase )
A_ : List[Any] = self.default_image_processor
A_ : Union[str, Any] = prepare_video()
A_ : List[Any] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : Tuple = model(**lowercase )
# verify the logits
A_ : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : str = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) | 709 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def count_of_possible_combinations(lowerCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
lowerCamelCase__ , lowerCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ : int = sum(
count_of_possible_combinations_with_dp_array(target - item , lowerCamelCase__ )
for item in array )
A_ : List[str] = answer
return answer
A_ : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = [0] * (target + 1)
A_ : int = 1
for i in range(1 , target + 1 ):
for j in range(lowerCamelCase__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = 3
lowerCamelCase = 5
lowerCamelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 710 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (('num_inference_steps', 25),)
def _a (self , **lowercase ):
A_ : List[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**lowercase )
return config
def _a (self , lowercase=0 , **lowercase ):
A_ : List[Any] = dict(self.forward_default_kwargs )
A_ : Optional[Any] = kwargs.pop("""num_inference_steps""" , lowercase )
A_ : Dict = self.dummy_sample
A_ : Tuple = 0.1 * sample
A_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A_ : List[Any] = self.get_scheduler_config(**lowercase )
A_ : List[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A_ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A_ : str = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ : List[str] = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
A_ : str = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Tuple = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a (self , lowercase=0 , **lowercase ):
A_ : Optional[Any] = dict(self.forward_default_kwargs )
A_ : Optional[int] = kwargs.pop("""num_inference_steps""" , lowercase )
A_ : List[Any] = self.dummy_sample
A_ : Optional[int] = 0.1 * sample
A_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
A_ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A_ : Optional[int] = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
A_ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ : Optional[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Optional[Any] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a (self , lowercase=None , **lowercase ):
if scheduler is None:
A_ : List[str] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(**lowercase )
A_ : str = scheduler_class(**lowercase )
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(**lowercase )
A_ : Optional[Any] = scheduler_class(**lowercase )
A_ : List[str] = 10
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : List[str] = model(lowercase , lowercase )
A_ : str = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def _a (self ):
A_ : Optional[Any] = dict(self.forward_default_kwargs )
A_ : str = kwargs.pop("""num_inference_steps""" , lowercase )
for scheduler_class in self.scheduler_classes:
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**lowercase )
A_ : List[str] = self.dummy_sample
A_ : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , """set_timesteps""" ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , """set_timesteps""" ):
A_ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A_ : str = dummy_past_residuals[: scheduler.config.solver_order]
A_ : Union[str, Any] = scheduler.timesteps[5]
A_ : Dict = scheduler.timesteps[6]
A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a (self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A_ : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A_ : List[str] = self.full_loop(scheduler=lowercase )
A_ : Any = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
A_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A_ : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
A_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A_ : str = UniPCMultistepScheduler.from_config(scheduler.config )
A_ : int = self.full_loop(scheduler=lowercase )
A_ : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def _a (self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def _a (self ):
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , solver_order=lowercase , solver_type=lowercase , )
def _a (self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def _a (self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
A_ : Tuple = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def _a (self ):
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def _a (self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def _a (self ):
A_ : Optional[Any] = self.full_loop()
A_ : Optional[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def _a (self ):
A_ : Optional[Any] = self.full_loop(prediction_type="""v_prediction""" )
A_ : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.10_14 ) < 1E-3
def _a (self ):
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
A_ : str = scheduler_class(**lowercase )
A_ : Tuple = 10
A_ : List[Any] = self.dummy_model()
A_ : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : Optional[int] = model(lowercase , lowercase )
A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
def _a (self , **lowercase ):
for scheduler_class in self.scheduler_classes:
A_ : Tuple = self.get_scheduler_config(**lowercase )
A_ : Dict = scheduler_class(**lowercase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , ):
'''simple docstring'''
A_ : Optional[Any] = {}
if train_file is not None:
A_ : int = [train_file]
if eval_file is not None:
A_ : Any = [eval_file]
if test_file is not None:
A_ : Optional[int] = [test_file]
A_ : Union[str, Any] = datasets.load_dataset("""csv""" , data_files=lowerCamelCase__ )
A_ : Dict = list(ds[list(files.keys() )[0]].features.keys() )
A_ : str = features_name.pop(lowerCamelCase__ )
A_ : Tuple = list(set(ds[list(files.keys() )[0]][label_name] ) )
A_ : Dict = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[int] = tokenizer.model_input_names
A_ : Any = {}
if len(lowerCamelCase__ ) == 1:
for k in files.keys():
A_ : Optional[int] = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) , batched=lowerCamelCase__ , )
elif len(lowerCamelCase__ ) == 2:
for k in files.keys():
A_ : Union[str, Any] = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , ) , batched=lowerCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A_ : int = {k: v for k, v in ex.items() if k in input_names}
A_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A_ : List[str] = {k: v for k, v in ex.items() if k in input_names}
A_ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
A_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
A_ : int = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A_ : Union[str, Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A_ : Optional[Any] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A_ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A_ : str = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A_ : List[str] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCamelCase :Any = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int = field(metadata={'help': 'Which column contains the label'} )
__SCREAMING_SNAKE_CASE : str = field(default=__UpperCAmelCase , metadata={'help': 'The path of the training file'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the development file'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the test file'} )
__SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def a ( ):
'''simple docstring'''
A_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A_ : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ : int = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A_ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A_ : Tuple = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase__ ) -> Dict:
A_ : int = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A_ : Union[str, Any] = TFTrainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A_ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A_ : Union[str, Any] = trainer.evaluate()
A_ : int = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(lowerCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(lowerCamelCase__ )
return results
if __name__ == "__main__":
main() | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = set()
# edges = list of graph's edges
A_ : str = get_edges(lowerCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
A_ : Dict = edges.pop()
chosen_vertices.add(lowerCamelCase__ )
chosen_vertices.add(lowerCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase__ )
return chosen_vertices
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 713 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 0 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :List[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase :List[Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = LEDTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = ['input_ids', 'attention_mask']
def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
A_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
A_ : Dict = getattr(lowercase , pre_tok_state.pop("""type""" ) )
A_ : List[Any] = add_prefix_space
A_ : Dict = pre_tok_class(**lowercase )
A_ : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Any = """post_processor"""
A_ : Dict = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
A_ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
A_ : List[str] = tuple(state["""cls"""] )
A_ : List[Any] = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
A_ : Optional[Any] = add_prefix_space
A_ : Optional[Any] = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
A_ : List[Any] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : List[Any] = getattr(lowercase , state.pop("""type""" ) )
A_ : List[Any] = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _a (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a (self , lowercase ):
A_ : int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
A_ : List[Any] = value
def _a (self , *lowercase , **lowercase ):
A_ : List[str] = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase , **lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : int = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def _a (self , lowercase , lowercase=None ):
A_ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a (self , lowercase , lowercase = None ):
A_ : Dict = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
A_ : Optional[int] = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
A_ : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ : Union[str, Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase )
if needs_to_be_padded:
A_ : Dict = len(lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ : Dict = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A_ : Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 714 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase :Any = logging.get_logger(__name__)
# General docstring
lowerCamelCase :Optional[int] = '''RegNetConfig'''
# Base docstring
lowerCamelCase :str = '''facebook/regnet-y-040'''
lowerCamelCase :int = [1, 1_0_8_8, 7, 7]
# Image classification docstring
lowerCamelCase :Union[str, Any] = '''facebook/regnet-y-040'''
lowerCamelCase :Any = '''tabby, tabby cat'''
lowerCamelCase :Optional[int] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : Optional[Any] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding="""VALID""" , groups=lowercase , use_bias=lowercase , name="""convolution""" , )
A_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def _a (self , lowercase ):
A_ : Union[str, Any] = self.convolution(self.padding(lowercase ) )
A_ : Any = self.normalization(lowercase )
A_ : Tuple = self.activation(lowercase )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , lowercase , **lowercase ):
super().__init__(**lowercase )
A_ : List[Any] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def _a (self , lowercase ):
A_ : Union[str, Any] = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[Any] = self.embedder(lowercase )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , lowercase , lowercase = 2 , **lowercase ):
super().__init__(**lowercase )
A_ : Tuple = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name="""convolution""" )
A_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def _a (self , lowercase , lowercase = False ):
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , lowercase , lowercase , **lowercase ):
super().__init__(**lowercase )
A_ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name="""pooler""" )
A_ : List[str] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def _a (self , lowercase ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
A_ : Dict = self.pooler(lowercase )
for layer_module in self.attention:
A_ : str = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
super().__init__(**lowercase )
A_ : List[Any] = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = max(1 , out_channels // config.groups_width )
A_ : Tuple = (
TFRegNetShortCut(lowercase , stride=lowercase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Tuple = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name="""layer.2""" ),
]
A_ : str = ACTaFN[config.hidden_act]
def _a (self , lowercase ):
A_ : Any = hidden_state
for layer_module in self.layers:
A_ : Dict = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : Any = self.activation(lowercase )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
super().__init__(**lowercase )
A_ : int = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = max(1 , out_channels // config.groups_width )
A_ : int = (
TFRegNetShortCut(lowercase , stride=lowercase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
A_ : Optional[Any] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name="""layer.3""" ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def _a (self , lowercase ):
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : Optional[int] = layer_module(lowercase )
A_ : Optional[int] = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
super().__init__(**lowercase )
A_ : Dict = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
A_ : Any = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name="""layers.0""" ),
*[layer(lowercase , lowercase , lowercase , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def _a (self , lowercase ):
for layer_module in self.layers:
A_ : Any = layer_module(lowercase )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , lowercase , **lowercase ):
super().__init__(**lowercase )
A_ : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
A_ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'stages.{i+1}' ) )
def _a (self , lowercase , lowercase = False , lowercase = True ):
A_ : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : int = hidden_states + (hidden_state,)
A_ : List[str] = stage_module(lowercase )
if output_hidden_states:
A_ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class _lowerCAmelCase ( tf.keras.layers.Layer ):
__SCREAMING_SNAKE_CASE : Optional[int] = RegNetConfig
def __init__(self , lowercase , **lowercase ):
super().__init__(**lowercase )
A_ : Tuple = config
A_ : Optional[Any] = TFRegNetEmbeddings(lowercase , name="""embedder""" )
A_ : Tuple = TFRegNetEncoder(lowercase , name="""encoder""" )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name="""pooler""" )
@unpack_inputs
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
A_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Optional[int] = self.embedder(lowercase , training=lowercase )
A_ : List[str] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Tuple = encoder_outputs[0]
A_ : Dict = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : Union[str, Any] = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = RegNetConfig
__SCREAMING_SNAKE_CASE : str = 'regnet'
__SCREAMING_SNAKE_CASE : Any = 'pixel_values'
@property
def _a (self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCamelCase :List[Any] = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase :Any = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCAmelCase , )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , *lowercase , **lowercase ):
super().__init__(lowercase , *lowercase , **lowercase )
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
A_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Optional[Any] = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCAmelCase , )
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
def __init__(self , lowercase , *lowercase , **lowercase ):
super().__init__(lowercase , *lowercase , **lowercase )
A_ : Optional[Any] = config.num_labels
A_ : List[Any] = TFRegNetMainLayer(lowercase , name="""regnet""" )
# classification head
A_ : Any = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a (self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
A_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Optional[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : List[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : str = self.classifier[0](lowercase )
A_ : Dict = self.classifier[1](lowercase )
A_ : Any = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states ) | 715 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( lowerCamelCase__ = 0.1 ):
'''simple docstring'''
A_ : Any = 3
A_ : Union[str, Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=224 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ):
A_ : List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
A_ : Dict = parent
A_ : Optional[Any] = batch_size
A_ : Union[str, Any] = num_channels
A_ : int = image_size
A_ : Optional[Any] = min_resolution
A_ : str = max_resolution
A_ : Optional[int] = do_resize
A_ : Optional[int] = size
A_ : Optional[int] = do_normalize
A_ : str = image_mean
A_ : Dict = image_std
def _a (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None
def _a (self ):
A_ : int = EfficientFormerImageProcessorTester(self )
@property
def _a (self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _a (self ):
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
def _a (self ):
pass
def _a (self ):
# Initialize image_processor
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processor(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a (self ):
# Initialize image_processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a (self ):
# Initialize image_processor
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A_ : List[str] = image_processor(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , ) | 717 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
(A_) : Union[str, Any] = extended_euclid(lowerCamelCase__ , a % b )
A_ : List[Any] = a // b
return (y, x - k * y)
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
(A_) : int = extended_euclid(lowerCamelCase__ , lowerCamelCase__ )
A_ : int = na * na
A_ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
(A_) : int = extended_euclid(lowerCamelCase__ , lowerCamelCase__ )
if b < 0:
A_ : Union[str, Any] = (b % n + n) % n
return b
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = invert_modulo(lowerCamelCase__ , lowerCamelCase__ ), invert_modulo(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = na * na
A_ : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True) | 718 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = image_size
A_ : Dict = patch_size
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = max_token_length
A_ : str = num_character_labels
A_ : Tuple = num_bpe_labels
A_ : Optional[Any] = num_wordpiece_labels
A_ : Any = hidden_size
A_ : Any = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = mlp_ratio
A_ : List[Any] = distilled
A_ : List[Any] = layer_norm_eps
A_ : int = drop_rate
A_ : Optional[int] = qkv_bias
A_ : Union[str, Any] = attn_drop_rate
A_ : Dict = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Dict = initializer_range | 719 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :Dict = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase :List[Any] = {
'''roberta-base''': 5_1_2,
'''roberta-large''': 5_1_2,
'''roberta-large-mnli''': 5_1_2,
'''distilroberta-base''': 5_1_2,
'''roberta-base-openai-detector''': 5_1_2,
'''roberta-large-openai-detector''': 5_1_2,
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Tuple = RobertaTokenizer
def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
A_ : Tuple = getattr(lowercase , pre_tok_state.pop("""type""" ) )
A_ : str = add_prefix_space
A_ : Optional[Any] = pre_tok_class(**lowercase )
A_ : Union[str, Any] = add_prefix_space
A_ : Optional[Any] = """post_processor"""
A_ : List[str] = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Any = tuple(state["""sep"""] )
if "cls" in state:
A_ : Union[str, Any] = tuple(state["""cls"""] )
A_ : Tuple = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
A_ : Optional[Any] = add_prefix_space
A_ : List[Any] = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Tuple = True
if changes_to_apply:
A_ : List[str] = getattr(lowercase , state.pop("""type""" ) )
A_ : Dict = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def _a (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a (self , lowercase ):
A_ : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
A_ : Union[str, Any] = value
def _a (self , *lowercase , **lowercase ):
A_ : List[Any] = kwargs.get("""is_split_into_words""" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
A_ : int = kwargs.get("""is_split_into_words""" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : Optional[int] = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def _a (self , lowercase , lowercase=None ):
A_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a (self , lowercase , lowercase = None ):
A_ : Dict = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase :Optional[int] = trt.Logger(trt.Logger.WARNING)
lowerCamelCase :Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase :Tuple = logging.getLogger(__name__)
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_8_4,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_2_8,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=2_0,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=3_0,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=4_2, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase :Optional[Any] = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase :Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase :Dict = args.per_device_eval_batch_size
lowerCamelCase :List[str] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase :Dict = True
lowerCamelCase :str = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase :str = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase :int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase :List[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase :Dict = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase :List[str] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase :List[str] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase :int = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase :Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
A_ : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase__ )
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase__ ) for d_inp in d_inputs] + [int(lowerCamelCase__ ), int(lowerCamelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : List[Any] = time.time()
A_ : Optional[Any] = end_time - start_time
A_ : Optional[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase :Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase :Dict = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase :Optional[int] = raw_datasets['''validation'''].column_names
lowerCamelCase :Tuple = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase :Any = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase :Optional[int] = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase :List[str] = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
lowerCamelCase :Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : str = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Optional[int] = tokenized_examples.sequence_ids(lowerCamelCase__ )
A_ : int = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Tuple = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Union[str, Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase :Optional[int] = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase :str = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase :Optional[Any] = default_data_collator
lowerCamelCase :Union[str, Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase :Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="eval" ):
'''simple docstring'''
A_ : Any = postprocess_qa_predictions(
examples=lowerCamelCase__ , features=lowerCamelCase__ , predictions=lowerCamelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Any = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase__ , label_ids=lowerCamelCase__ )
lowerCamelCase :Optional[int] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a ( lowerCamelCase__ ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(lowerCamelCase__ ) ) * engine.get_binding_dtype(lowerCamelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase :List[Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase :Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase :str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase :str = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase :Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase :Optional[Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
lowerCamelCase :Optional[Any] = 0.0
lowerCamelCase :int = 0
lowerCamelCase :str = timeit.default_timer()
lowerCamelCase :Optional[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase :Dict = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase :str = outputs
lowerCamelCase :Optional[Any] = torch.tensor(start_logits)
lowerCamelCase :Optional[int] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase :Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
lowerCamelCase :Optional[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
lowerCamelCase :Optional[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase :Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
lowerCamelCase :Dict = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase :Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase :Union[str, Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase :Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}") | 700 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 0 |
'''simple docstring'''
from collections import deque
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : Dict = process_name # process name
A_ : Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A_ : Tuple = arrival_time
A_ : Optional[Any] = burst_time # remaining burst time
A_ : Optional[Any] = 0 # total time of the process wait in ready queue
A_ : Any = 0 # time from arrival time to completion time
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase , lowercase , ):
# total number of mlfq's queues
A_ : Union[str, Any] = number_of_queues
# time slice of queues that round robin algorithm applied
A_ : Tuple = time_slices
# unfinished process is in this ready_queue
A_ : Union[str, Any] = queue
# current time
A_ : Any = current_time
# finished process is in this sequence queue
A_ : deque[Process] = deque()
def _a (self ):
A_ : Tuple = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _a (self , lowercase ):
A_ : Union[str, Any] = []
for i in range(len(lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _a (self , lowercase ):
A_ : str = []
for i in range(len(lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _a (self , lowercase ):
A_ : Any = []
for i in range(len(lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _a (self , lowercase ):
return [q.burst_time for q in queue]
def _a (self , lowercase ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _a (self , lowercase ):
A_ : deque[Process] = deque() # sequence deque of finished process
while len(lowercase ) != 0:
A_ : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A_ : int = 0
# set the process's turnaround time because it is finished
A_ : str = self.current_time - cp.arrival_time
# set the completion time
A_ : Union[str, Any] = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase )
self.finish_queue.extend(lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _a (self , lowercase , lowercase ):
A_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase ) ):
A_ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A_ : str = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A_ : int = 0
# set the finish time
A_ : Tuple = self.current_time
# update the process' turnaround time because it is finished
A_ : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase )
self.finish_queue.extend(lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _a (self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
A_ : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase :Tuple = Process('''P1''', 0, 5_3)
lowerCamelCase :int = Process('''P2''', 0, 1_7)
lowerCamelCase :Optional[int] = Process('''P3''', 0, 6_8)
lowerCamelCase :Union[str, Any] = Process('''P4''', 0, 2_4)
lowerCamelCase :Any = 3
lowerCamelCase :Optional[int] = [1_7, 2_5]
lowerCamelCase :Optional[int] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
lowerCamelCase :int = Process('''P1''', 0, 5_3)
lowerCamelCase :str = Process('''P2''', 0, 1_7)
lowerCamelCase :str = Process('''P3''', 0, 6_8)
lowerCamelCase :List[str] = Process('''P4''', 0, 2_4)
lowerCamelCase :Optional[Any] = 3
lowerCamelCase :Union[str, Any] = [1_7, 2_5]
lowerCamelCase :int = deque([Pa, Pa, Pa, Pa])
lowerCamelCase :Dict = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase :Optional[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
) | 701 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = [0] * len(lowerCamelCase__ )
A_ : Optional[int] = []
A_ : Any = []
A_ : List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCamelCase__ )
while queue:
A_ : str = queue.pop(0 )
cnt += 1
topo.append(lowerCamelCase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase__ )
if cnt != len(lowerCamelCase__ ):
print("""Cycle exists""" )
else:
print(lowerCamelCase__ )
# Adjacency List of Graph
lowerCamelCase :Tuple = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 702 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowerCAmelCase :
def _a (self , lowercase , lowercase , lowercase ):
return None
class _lowerCAmelCase :
def _a (self , lowercase , lowercase , lowercase , lowercase ):
return None
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _a (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase , """tf""" , 12 , **lowercase )
@require_torch
@slow
def _a (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase , """pt""" , 12 , **lowercase )
@require_torch
@slow
def _a (self ):
from transformers import BertModel
A_ : Optional[Any] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(lowercase ) )
vocab_file.flush()
A_ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A_ : Optional[int] = BertModel(BertConfig(vocab_size=len(lowercase ) ) )
model.save_pretrained(lowercase )
self._test_export(lowercase , """pt""" , 12 , lowercase )
@require_tf
@slow
def _a (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : List[Any] = self._test_export(lowercase , """tf""" , 12 , **lowercase )
A_ : Union[str, Any] = quantize(Path(lowercase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _a (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : Any = self._test_export(lowercase , """pt""" , 12 , **lowercase )
A_ : Any = quantize(lowercase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _a (self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
A_ : Any = Path(lowercase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
return path
except Exception as e:
self.fail(lowercase )
@require_torch
@require_tokenizers
@slow
def _a (self ):
from transformers import BertModel
A_ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : Optional[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase , lowercase , """pt""" )
@require_tf
@require_tokenizers
@slow
def _a (self ):
from transformers import TFBertModel
A_ : Dict = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase , lowercase , """tf""" )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = FeatureExtractionPipeline(lowercase , lowercase )
A_ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
A_ : Optional[Any] = infer_shapes(lowercase , lowercase )
# Assert all variables are present
self.assertEqual(len(lowercase ) , len(lowercase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowercase )
self.assertSequenceEqual(variable_names[3:] , lowercase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def _a (self ):
A_ : Dict = ["""input_ids""", """attention_mask""", """token_type_ids"""]
A_ : List[str] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
A_ : List[str] = ensure_valid_input(FuncContiguousArgs() , lowercase , lowercase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase ) , set(lowercase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A_ : str = ensure_valid_input(FuncNonContiguousArgs() , lowercase , lowercase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(len(lowercase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def _a (self ):
A_ : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() ) | 703 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
A_ : Union[str, Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ : List[Any] = 1
if upper_limit > 0:
A_ : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCamelCase :str = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 704 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowerCamelCase :Any = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
lowerCamelCase :Any = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
lowerCamelCase :Optional[int] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def _a (self , lowercase , lowercase ):
A_ : str = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : str = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : Optional[Any] = evaluate(dataset=lowercase , predictions=lowercase )
return score | 705 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase :Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[Any] = value
elif weight_type == "weight_g":
A_ : Optional[Any] = value
elif weight_type == "weight_v":
A_ : List[str] = value
elif weight_type == "bias":
A_ : List[str] = value
elif weight_type == "running_mean":
A_ : Union[str, Any] = value
elif weight_type == "running_var":
A_ : Any = value
elif weight_type == "num_batches_tracked":
A_ : List[Any] = value
elif weight_type == "inv_freq":
A_ : Dict = value
else:
A_ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
A_ : Union[str, Any] = fairseq_model.state_dict()
A_ : str = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
A_ : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
A_ : int = True
else:
for key, mapped_key in MAPPING.items():
A_ : Dict = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ : List[str] = True
if "*" in mapped_key:
A_ : Any = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Any = mapped_key.replace("""*""" , lowerCamelCase__ )
if "pos_bias_u" in name:
A_ : List[str] = None
elif "pos_bias_v" in name:
A_ : Optional[int] = None
elif "weight_g" in name:
A_ : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "bias" in name:
A_ : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : List[Any] = """running_mean"""
elif "inv_freq" in name:
A_ : str = """inv_freq"""
elif "running_var" in name:
A_ : str = """running_var"""
elif "num_batches_tracked" in name:
A_ : Union[str, Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = full_name.split("""conv_layers.""" )[-1]
A_ : Dict = name.split(""".""" )
A_ : int = int(items[0] )
A_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
A_ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
A_ : List[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
A_ : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
A_ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
'''simple docstring'''
if config_path is not None:
A_ : Any = WavaVecaConformerConfig.from_pretrained(lowerCamelCase__ , hidden_act="""swish""" )
else:
A_ : Union[str, Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
A_ : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
A_ : Optional[int] = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : Optional[Any] = target_dict.pad_index
A_ : List[Any] = target_dict.bos_index
A_ : List[Any] = target_dict.eos_index
A_ : Optional[Any] = len(target_dict.symbols )
A_ : int = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
A_ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ : Tuple = 0
A_ : str = 1
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
A_ : Any = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
A_ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
A_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
A_ : List[str] = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
A_ : int = WavaVecaConformerForCTC(lowerCamelCase__ )
else:
A_ : str = WavaVecaConformerForPreTraining(lowerCamelCase__ )
if is_finetuned:
A_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A_ : Dict = argparse.Namespace(task="""audio_pretraining""" )
A_ : Optional[int] = fairseq.tasks.setup_task(lowerCamelCase__ )
A_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
A_ : Dict = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCamelCase :int = TypeVar('''KT''')
lowerCamelCase :Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
def __init__(self , lowercase = "root" , lowercase = None ):
A_ : Union[str, Any] = key
A_ : int = value
A_ : list[Node[KT, VT]] = []
def __repr__(self ):
return F'Node({self.key}: {self.value})'
@property
def _a (self ):
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
def __init__(self , lowercase = 0.5 , lowercase = 16 ):
A_ : Node[KT, VT] = Node[KT, VT]()
A_ : Tuple = 0
A_ : Tuple = p
A_ : int = max_level
def __str__(self ):
A_ : Union[str, Any] = list(self )
if len(lowercase ) == 0:
return F'SkipList(level={self.level})'
A_ : Dict = max((len(str(lowercase ) ) for item in items) , default=4 )
A_ : str = max(lowercase , 4 ) + 4
A_ : Optional[int] = self.head
A_ : Dict = []
A_ : Optional[Any] = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(lowercase , """-""" ) + """* """ * len(lowercase ) )
lines.append(""" """ * label_size + """| """ * len(lowercase ) )
while len(node.forward ) != 0:
A_ : Optional[Any] = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(lowercase ) )
A_ : Dict = node.forward
lines.append("""None""".ljust(lowercase ) + """* """ * len(lowercase ) )
return F'SkipList(level={self.level})\n' + "\n".join(lowercase )
def __iter__(self ):
A_ : Optional[int] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
A_ : str = node.forward[0]
def _a (self ):
A_ : Dict = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _a (self , lowercase ):
A_ : Union[str, Any] = []
A_ : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
A_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _a (self , lowercase ):
A_ : List[Any] = self._locate_node(lowercase )
if node is not None:
for i, update_node in enumerate(lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
A_ : List[Any] = node.forward[i]
else:
A_ : List[str] = update_node.forward[:i]
def _a (self , lowercase , lowercase ):
A_ : Union[str, Any] = self._locate_node(lowercase )
if node is not None:
A_ : Any = value
else:
A_ : List[str] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase ):
update_vector.append(self.head )
A_ : List[str] = level
A_ : List[str] = Node(lowercase , lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase )
else:
A_ : str = new_node
def _a (self , lowercase ):
A_ : List[str] = self._locate_node(lowercase )
if node is not None:
return node.value
return None
def a ( ):
'''simple docstring'''
A_ : int = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
A_ : List[Any] = skip_list.head
A_ : List[str] = {}
while node.level != 0:
A_ : Tuple = node.forward[0]
A_ : List[Any] = node.value
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a ( ):
'''simple docstring'''
A_ : int = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
A_ : Dict = skip_list.head
A_ : Tuple = {}
while node.level != 0:
A_ : Tuple = node.forward[0]
A_ : Optional[Any] = node.value
if len(lowerCamelCase__ ) != 4:
print()
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a ( ):
'''simple docstring'''
A_ : Any = SkipList()
assert skip_list.find("""Some key""" ) is None
def a ( ):
'''simple docstring'''
A_ : Tuple = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def a ( ):
'''simple docstring'''
A_ : Optional[int] = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def a ( ):
'''simple docstring'''
A_ : List[Any] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def a ( ):
'''simple docstring'''
A_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def a ( ):
'''simple docstring'''
A_ : Any = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(lowerCamelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a ( ):
'''simple docstring'''
def is_sorted(lowerCamelCase__ ):
return all(next_item >= item for item, next_item in zip(lowerCamelCase__ , lst[1:] ) )
A_ : List[str] = SkipList()
for i in range(10 ):
skip_list.insert(lowerCamelCase__ , lowerCamelCase__ )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCamelCase__ ) )
def a ( ):
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a ( ):
'''simple docstring'''
A_ : List[Any] = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = XLNetTokenizer
__SCREAMING_SNAKE_CASE : Tuple = XLNetTokenizerFast
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : List[str] = True
def _a (self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Optional[Any] = XLNetTokenizer(lowercase , keep_accents=lowercase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
A_ : Union[str, Any] = """<s>"""
A_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _a (self ):
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(lowercase ) , 1006 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a (self ):
A_ : Optional[Any] = XLNetTokenizer(lowercase , keep_accents=lowercase )
A_ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] )
A_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Any = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A_ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a (self ):
A_ : List[str] = XLNetTokenizer(lowercase , do_lower_case=lowercase )
A_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def _a (self ):
A_ : Optional[int] = XLNetTokenizer(lowercase , do_lower_case=lowercase )
A_ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def _a (self ):
A_ : List[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
A_ : Union[str, Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
A_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _a (self ):
# fmt: off
A_ : Any = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , ) | 708 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
debug_launcher(test_script.main )
def _a (self ):
debug_launcher(test_ops.main ) | 709 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def a ( lowerCamelCase__ ):
'''simple docstring'''
return np.maximum(0 , lowerCamelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5] | 710 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = []
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase__ ) )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase__ ) )
elif isinstance(lowerCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for d in reversed(lowerCamelCase__ ):
idx.append(flat_idx % d )
A_ : Dict = flat_idx // d
return tuple(reversed(lowerCamelCase__ ) )
@torch.jit.ignore
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
'''simple docstring'''
def reduce_edge_list(lowerCamelCase__ ) -> None:
A_ : str = True
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
A_ : Any = l[reversed_idx]
if start_edges is None:
A_ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase__ )
if end_edges is None:
A_ : Dict = [e == (d - 1) for e, d in zip(lowerCamelCase__ , lowerCamelCase__ )]
reduce_edge_list(lowerCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase__ ) == 0:
return [()]
elif len(lowerCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
A_ : List[Tuple[slice, ...]] = []
A_ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase__ , lowerCamelCase__ ):
if s == e:
path_list.append(slice(lowerCamelCase__ , s + 1 ) )
else:
break
A_ : Tuple[slice, ...] = tuple(lowerCamelCase__ )
A_ : int = len(lowerCamelCase__ )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ : Union[str, Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ : Union[str, Any] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
A_ : List[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = t.shape[:no_batch_dims]
A_ : str = list(_flat_idx_to_idx(lowerCamelCase__ , lowerCamelCase__ ) )
# _get_minimal_slice_set is inclusive
A_ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase__ ) )
# Get an ordered list of slices to perform
A_ : Tuple = _get_minimal_slice_set(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
A_ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = False , ):
'''simple docstring'''
if not (len(lowerCamelCase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
A_ : Optional[int] = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase__ )]
A_ : List[Any] = tuple([max(lowerCamelCase__ ) for s in zip(*lowerCamelCase__ )] )
def _prep_inputs(lowerCamelCase__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
A_ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
A_ : Tuple = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
A_ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
A_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase__ )
A_ : Tuple = None
if _out is not None:
A_ : Dict = tensor_tree_map(lambda lowerCamelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
A_ : Any = 1
for d in orig_batch_dims:
flat_batch_dim *= d
A_ : Any = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
A_ : Dict = 0
A_ : Dict = prepped_outputs
for _ in range(lowerCamelCase__ ):
# Chunk the input
if not low_mem:
A_ : Optional[int] = _select_chunk
else:
A_ : Any = partial(
_chunk_slice , flat_start=lowerCamelCase__ , flat_end=min(lowerCamelCase__ , i + chunk_size ) , no_batch_dims=len(lowerCamelCase__ ) , )
A_ : Dict[str, Any] = tensor_tree_map(lowerCamelCase__ , lowerCamelCase__ )
# Run the layer on the chunk
A_ : Tuple = layer(**lowerCamelCase__ )
# Allocate space for the output
if out is None:
A_ : List[Any] = tensor_tree_map(lambda lowerCamelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
def assign(lowerCamelCase__ , lowerCamelCase__ ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
assign(lowerCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
A_ : Union[str, Any] = da[k]
assign(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for xa, xa in zip(lowerCamelCase__ , lowerCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
A_ : Optional[int] = xa
elif isinstance(lowerCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
A_ : int = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
A_ : Any = tensor_tree_map(lambda lowerCamelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase__ )
return out
class _lowerCAmelCase :
def __init__(self , lowercase = 512 , ):
A_ : Any = max_chunk_size
A_ : Optional[int] = None
A_ : Optional[tuple] = None
def _a (self , lowercase , lowercase , lowercase ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
A_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
A_ : int = [c for c in candidates if c > min_chunk_size]
A_ : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase ) -> bool:
try:
with torch.no_grad():
fn(*lowercase , chunk_size=lowercase )
return True
except RuntimeError:
return False
A_ : Union[str, Any] = 0
A_ : Optional[int] = len(lowercase ) - 1
while i > min_viable_chunk_size_index:
A_ : Optional[Any] = test_chunk_size(candidates[i] )
if not viable:
A_ : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
A_ : int = i
A_ : Tuple = (i + len(lowercase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _a (self , lowercase , lowercase ):
A_ : Any = True
for aa, aa in zip(lowercase , lowercase ):
assert type(lowercase ) == type(lowercase )
if isinstance(lowercase , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
A_ : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda lowercase : x[0] )]
A_ : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda lowercase : x[0] )]
consistent &= self._compare_arg_caches(lowercase , lowercase )
else:
consistent &= aa == aa
return consistent
def _a (self , lowercase , lowercase , lowercase , ):
A_ : Union[str, Any] = True
A_ : tuple = tree_map(lambda lowercase : a.shape if isinstance(lowercase , torch.Tensor ) else a , lowercase , lowercase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase )
A_ : int = self._compare_arg_caches(self.cached_arg_data , lowercase )
else:
# Otherwise, we can reuse the precomputed value
A_ : Dict = False
if not consistent:
A_ : Optional[int] = self._determine_favorable_chunk_size(
lowercase , lowercase , lowercase , )
A_ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 0 |
'''simple docstring'''
def a ( ):
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(lowerCamelCase__ , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }") | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : Optional[Any] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowercase ) != 0:
A_ : List[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowercase ) != cols:
raise error
for value in row:
if not isinstance(lowercase , (int, float) ):
raise error
A_ : str = rows
else:
A_ : Optional[int] = []
def _a (self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _a (self ):
return len(self.rows )
@property
def _a (self ):
return len(self.rows[0] )
@property
def _a (self ):
return (self.num_rows, self.num_columns)
@property
def _a (self ):
return self.order[0] == self.order[1]
def _a (self ):
A_ : str = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowercase )
def _a (self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _a (self ):
return bool(self.determinant() )
def _a (self , lowercase , lowercase ):
A_ : Optional[int] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowercase ).determinant()
def _a (self , lowercase , lowercase ):
if (row + column) % 2 == 0:
return self.get_minor(lowercase , lowercase )
return -1 * self.get_minor(lowercase , lowercase )
def _a (self ):
return Matrix(
[
[self.get_minor(lowercase , lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _a (self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _a (self ):
A_ : Optional[int] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowercase )
def _a (self ):
A_ : str = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__(self ):
return str(self.rows )
def __str__(self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowercase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def _a (self , lowercase , lowercase = None ):
A_ : Optional[Any] = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowercase , lowercase ):
raise type_error
for value in row:
if not isinstance(lowercase , (int, float) ):
raise type_error
if len(lowercase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowercase )
else:
A_ : Any = self.rows[0:position] + [row] + self.rows[position:]
def _a (self , lowercase , lowercase = None ):
A_ : List[str] = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowercase , lowercase ):
raise type_error
for value in column:
if not isinstance(lowercase , (int, float) ):
raise type_error
if len(lowercase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
A_ : Any = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A_ : Optional[int] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self , lowercase ):
if not isinstance(lowercase , lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self , lowercase ):
return not self == other
def __neg__(self ):
return self * -1
def __add__(self , lowercase ):
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self , lowercase ):
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self , lowercase ):
if isinstance(lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowercase , lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowercase , lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__(self , lowercase ):
if not isinstance(lowercase , lowercase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
A_ : List[str] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _a (cls , lowercase , lowercase ):
return sum(row[i] * column[i] for i in range(len(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 713 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase :Any = logging.getLogger()
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
A_ : str = os.path.join(lowerCamelCase__ , """all_results.json""" )
if os.path.exists(lowerCamelCase__ ):
with open(lowerCamelCase__ , """r""" ) as f:
A_ : Any = json.load(lowerCamelCase__ )
else:
raise ValueError(f'can\'t find {path}' )
return results
lowerCamelCase :Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self ):
import xla_spawn
A_ : int = self.get_auto_remove_tmp_dir()
A_ : Optional[Any] = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowercase , """argv""" , lowercase ):
A_ : str = time()
xla_spawn.main()
A_ : Optional[Any] = time()
A_ : List[Any] = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _a (self ):
import xla_spawn
A_ : Union[str, Any] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowercase , """argv""" , lowercase ):
xla_spawn.main() | 714 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 0 |
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = 1
A_ : Any = 2
while i * i <= n:
A_ : List[str] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a ( ):
'''simple docstring'''
A_ : Dict = 1
A_ : Any = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase__ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution()) | 715 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase :Dict = {
'''bert-base-uncased''': 5_1_2,
'''bert-large-uncased''': 5_1_2,
'''bert-base-cased''': 5_1_2,
'''bert-large-cased''': 5_1_2,
'''bert-base-multilingual-uncased''': 5_1_2,
'''bert-base-multilingual-cased''': 5_1_2,
'''bert-base-chinese''': 5_1_2,
'''bert-base-german-cased''': 5_1_2,
'''bert-large-uncased-whole-word-masking''': 5_1_2,
'''bert-large-cased-whole-word-masking''': 5_1_2,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_1_2,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_1_2,
'''bert-base-cased-finetuned-mrpc''': 5_1_2,
'''bert-base-german-dbmdz-cased''': 5_1_2,
'''bert-base-german-dbmdz-uncased''': 5_1_2,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_1_2,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_1_2,
'''wietsedv/bert-base-dutch-cased''': 5_1_2,
}
lowerCamelCase :Tuple = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer
def __init__(self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
A_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
A_ : Any = getattr(lowercase , normalizer_state.pop("""type""" ) )
A_ : int = do_lower_case
A_ : List[Any] = strip_accents
A_ : Optional[int] = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**lowercase )
A_ : Union[str, Any] = do_lower_case
def _a (self , lowercase , lowercase=None ):
A_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a (self , lowercase , lowercase = None ):
A_ : List[str] = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a (self , lowercase , lowercase = None ):
A_ : Any = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase ) | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.02 , lowercase=["stage2", "stage3", "stage4"] , lowercase=3 , lowercase=None , ):
A_ : int = parent
A_ : str = batch_size
A_ : Union[str, Any] = image_size
A_ : str = num_channels
A_ : str = num_stages
A_ : Optional[int] = hidden_sizes
A_ : int = depths
A_ : Any = is_training
A_ : List[Any] = use_labels
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : int = type_sequence_label_size
A_ : int = initializer_range
A_ : List[Any] = out_features
A_ : Optional[int] = num_labels
A_ : Optional[Any] = scope
A_ : List[str] = num_stages
def _a (self ):
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[str] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _a (self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowercase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = UperNetForSemanticSegmentation(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _a (self ):
A_ : List[str] = self.prepare_config_and_inputs()
(
A_
) : Union[str, Any] = config_and_inputs
A_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Tuple = False
def _a (self ):
A_ : Optional[int] = UperNetModelTester(self )
A_ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
return
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : str = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Union[str, Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = _config_zero_init(lowercase )
A_ : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A_ : str = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _a (self ):
pass
@slow
def _a (self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : Any = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
A_ : int = Image.open(lowerCamelCase__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : int = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
A_ : Dict = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(lowercase )
A_ : Tuple = prepare_img()
A_ : Optional[Any] = processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
with torch.no_grad():
A_ : int = model(**lowercase )
A_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : str = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1E-4 ) )
def _a (self ):
A_ : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
A_ : Dict = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(lowercase )
A_ : Any = prepare_img()
A_ : str = processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
with torch.no_grad():
A_ : Optional[Any] = model(**lowercase )
A_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1E-4 ) ) | 717 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : List[str] = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase , lowercase ):
A_ : Any = False
super().__init__(lowercase , lowercase )
A_ : Optional[Any] = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : Optional[int] = self.tokenizer
A_ : Optional[int] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
A_ : List[str] = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
A_ : Optional[Any] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
A_ : str = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
A_ : Any = self.tokenizer.model_input_names
A_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 718 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = XCLIPTextConfig()
# derive patch size from model name
A_ : Optional[Any] = model_name.find("""patch""" )
A_ : List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
A_ : str = XCLIPVisionConfig(patch_size=lowerCamelCase__ , num_frames=lowerCamelCase__ )
if "large" in model_name:
A_ : List[str] = 7_68
A_ : List[str] = 30_72
A_ : Any = 12
A_ : int = 10_24
A_ : Tuple = 40_96
A_ : Dict = 16
A_ : Union[str, Any] = 24
A_ : Dict = 7_68
A_ : Dict = 30_72
if model_name == "xclip-large-patch14-16-frames":
A_ : Union[str, Any] = 3_36
A_ : Optional[Any] = XCLIPConfig.from_text_vision_configs(lowerCamelCase__ , lowerCamelCase__ )
if "large" in model_name:
A_ : Union[str, Any] = 7_68
return config
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name == "token_embedding.weight":
A_ : Optional[Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
A_ : Optional[Any] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
A_ : Any = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
A_ : List[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
A_ : Optional[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
A_ : List[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
A_ : Optional[int] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
A_ : Optional[int] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
A_ : Optional[int] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
A_ : List[Any] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
A_ : Union[str, Any] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
A_ : List[Any] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
A_ : int = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
A_ : int = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
A_ : Dict = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
A_ : Union[str, Any] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
A_ : Dict = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
A_ : Dict = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
A_ : Any = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
A_ : str = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
A_ : Dict = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
A_ : str = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : List[str] = orig_state_dict.pop(lowerCamelCase__ )
if "attn.in_proj" in key:
A_ : Any = key.split(""".""" )
if key.startswith("""visual""" ):
A_ : Dict = key_split[3]
A_ : Tuple = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
A_ : Optional[int] = val[
:dim, :
]
A_ : List[str] = val[
dim : dim * 2, :
]
A_ : Optional[int] = val[
-dim:, :
]
else:
A_ : Union[str, Any] = val[
:dim
]
A_ : List[str] = val[
dim : dim * 2
]
A_ : int = val[
-dim:
]
else:
if "weight" in key:
A_ : Union[str, Any] = val[
:dim, :
]
A_ : Optional[Any] = val[
dim : dim * 2, :
]
A_ : Union[str, Any] = val[
-dim:, :
]
else:
A_ : List[Any] = val[:dim]
A_ : Any = val[
dim : dim * 2
]
A_ : Optional[Any] = val[-dim:]
elif key.startswith("""mit""" ):
A_ : Any = key_split[2]
A_ : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
A_ : Optional[int] = val[:dim, :]
A_ : Dict = val[dim : dim * 2, :]
A_ : Tuple = val[-dim:, :]
else:
A_ : List[str] = val[:dim]
A_ : List[str] = val[dim : dim * 2]
A_ : List[Any] = val[-dim:]
else:
A_ : List[Any] = key_split[2]
A_ : str = config.text_config.hidden_size
if "weight" in key:
A_ : Optional[int] = val[:dim, :]
A_ : Optional[int] = val[
dim : dim * 2, :
]
A_ : Optional[Any] = val[-dim:, :]
else:
A_ : List[Any] = val[:dim]
A_ : Optional[int] = val[
dim : dim * 2
]
A_ : Any = val[-dim:]
else:
A_ : Dict = rename_key(lowerCamelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
A_ : str = val.T
A_ : str = val
return orig_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
if num_frames == 8:
A_ : Optional[int] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
A_ : Dict = """eating_spaghetti.npy"""
elif num_frames == 32:
A_ : Union[str, Any] = """eating_spaghetti_32_frames.npy"""
A_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowerCamelCase__ , repo_type="""dataset""" , )
A_ : Optional[int] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
A_ : Union[str, Any] = model_to_url[model_name]
A_ : List[str] = 8
if "16-frames" in model_name:
A_ : str = 16
elif "shot" in model_name:
A_ : List[str] = 32
A_ : Tuple = get_xclip_config(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = XCLIPModel(lowerCamelCase__ )
model.eval()
if "drive" in checkpoint_url:
A_ : Union[str, Any] = """pytorch_model.bin"""
gdown.cached_download(lowerCamelCase__ , lowerCamelCase__ , quiet=lowerCamelCase__ )
A_ : List[Any] = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model"""]
else:
A_ : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase__ )["""model"""]
A_ : Optional[Any] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[int] = XCLIPModel(lowerCamelCase__ )
A_ : Union[str, Any] = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
A_ : Any = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
A_ : Optional[int] = VideoMAEImageProcessor(size=lowerCamelCase__ )
A_ : Tuple = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
A_ : Dict = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
A_ : int = XCLIPProcessor(image_processor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
A_ : int = prepare_video(lowerCamelCase__ )
A_ : List[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowerCamelCase__ , return_tensors="""pt""" , padding=lowerCamelCase__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase__ )
# Verify outputs
A_ : str = outputs.logits_per_video
A_ : str = logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowerCamelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
A_ : int = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
A_ : Dict = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
A_ : Union[str, Any] = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
A_ : int = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
A_ : int = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
A_ : Dict = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
A_ : Tuple = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
A_ : Any = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
A_ : str = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
A_ : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
A_ : Union[str, Any] = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
A_ : int = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
A_ : str = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
A_ : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
A_ : Dict = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
A_ : int = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
A_ : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
A_ : Dict = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(f'Model name {model_name} not supported' )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
processor.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :Any = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 719 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A_ : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A_ : Union[str, Any] = parent
A_ : Optional[int] = batch_size
A_ : Optional[int] = num_channels
A_ : int = min_resolution
A_ : Optional[Any] = max_resolution
A_ : List[Any] = do_resize
A_ : List[Any] = size
A_ : Any = do_normalize
A_ : Union[str, Any] = image_mean
A_ : Union[str, Any] = image_std
A_ : Dict = do_rescale
A_ : List[str] = rescale_factor
A_ : Any = do_pad
def _a (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a (self , lowercase , lowercase=False ):
if not batched:
A_ : int = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A_ : int = image.size
else:
A_ : List[Any] = image.shape[1], image.shape[2]
if w < h:
A_ : Any = int(self.size["""shortest_edge"""] * h / w )
A_ : int = self.size["""shortest_edge"""]
elif w > h:
A_ : List[Any] = self.size["""shortest_edge"""]
A_ : Optional[int] = int(self.size["""shortest_edge"""] * w / h )
else:
A_ : List[str] = self.size["""shortest_edge"""]
A_ : Union[str, Any] = self.size["""shortest_edge"""]
else:
A_ : Optional[int] = []
for image in image_inputs:
A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : int = max(lowercase , key=lambda lowercase : item[0] )[0]
A_ : int = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def _a (self ):
A_ : str = ConditionalDetrImageProcessingTester(self )
@property
def _a (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
def _a (self ):
A_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
A_ : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def _a (self ):
pass
def _a (self ):
# Initialize image_processing
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ : str = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Tuple = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A_ : Optional[int] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
# Initialize image_processing
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Union[str, Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
A_ : int = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
# Initialize image_processing
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ : str = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
A_ : Any = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a (self ):
# prepare image and target
A_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A_ : Dict = json.loads(f.read() )
A_ : Any = {"""image_id""": 39769, """annotations""": target}
# encode them
A_ : str = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
A_ : Any = image_processing(images=lowercase , annotations=lowercase , return_tensors="""pt""" )
# verify pixel values
A_ : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase )
A_ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : Optional[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) )
# verify boxes
A_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase )
A_ : Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) )
# verify is_crowd
A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) )
# verify class_labels
A_ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) )
# verify orig_size
A_ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) )
# verify size
A_ : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) )
@slow
def _a (self ):
# prepare image, target and masks_path
A_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A_ : List[Any] = json.loads(f.read() )
A_ : int = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
A_ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A_ : Dict = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
A_ : Dict = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="""pt""" )
# verify pixel values
A_ : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase )
A_ : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : Union[str, Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase )
A_ : str = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) )
# verify is_crowd
A_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) )
# verify class_labels
A_ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) )
# verify masks
A_ : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase )
# verify orig_size
A_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) )
# verify size
A_ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) ) | 720 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.