code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : List[Any] =""
lowerCamelCase : int =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase : Union[str, Any] =None # compression type in fsspec. ex: "gzip"
lowerCamelCase : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Any , lowerCAmelCase : str = "" , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[dict] = None , **lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__lowerCAmelCase : Any = fsspec.open(
__snake_case , mode="""rb""" , protocol=__snake_case , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__lowerCAmelCase : Union[str, Any] = os.path.basename(self.file.path.split("""::""" )[0] )
__lowerCAmelCase : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
__lowerCAmelCase : Optional[Any] = None
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , lowerCAmelCase : str ) -> int:
"""simple docstring"""
return super()._strip_protocol(__snake_case ).lstrip("""/""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
if self.dir_cache is None:
__lowerCAmelCase : Optional[Any] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__lowerCAmelCase : Optional[Any] = {f["""name"""]: f}
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.file.open().read()
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str = "rb" , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=None , **lowerCAmelCase : Any , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] ="bz2"
lowerCamelCase : int ="bz2"
lowerCamelCase : List[Any] =".bz2"
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : List[Any] ="gzip"
lowerCamelCase : Any ="gzip"
lowerCamelCase : int =".gz"
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] ="lz4"
lowerCamelCase : List[str] ="lz4"
lowerCamelCase : int =".lz4"
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : int ="xz"
lowerCamelCase : str ="xz"
lowerCamelCase : Union[str, Any] =".xz"
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : List[str] ="zstd"
lowerCamelCase : Any ="zstd"
lowerCamelCase : Dict =".zst"
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str = "rb" , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[dict] = None , lowerCAmelCase : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__lowerCAmelCase : Dict = self.file.__enter__
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = file_
def __enter__( self : Dict ) -> List[str]:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return iter(self._file )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return next(self._file )
def __getattr__( self : Any , lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
return getattr(self._file , __snake_case )
def fixed_enter(*lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : int ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
__lowerCAmelCase : List[str] = fixed_enter
| 651 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
A_ : Union[str, Any] =logging.getLogger(__name__)
A_ : Optional[Any] =5_0 # max width of layer names
A_ : Optional[int] =7_0 # max width of quantizer names
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> Tuple:
_lowerCamelCase = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=snake_case , type=snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict )-> Union[str, Any]:
if args.calibrator == "max":
_lowerCamelCase = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_lowerCamelCase = """histogram"""
elif args.calibrator == "mse":
_lowerCamelCase = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
_lowerCamelCase = QuantDescriptor(num_bits=args.aprec , calib_method=snake_case )
_lowerCamelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : List[str] , snake_case : Tuple=False , snake_case : Dict=False )-> Dict:
logger.info('Configuring Model for Quantization' )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(snake_case , ['embeddings'] , which='weight' , _disabled=snake_case )
if args.quant_disable:
set_quantizer_by_name(snake_case , [''] , _disabled=snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(snake_case , args.quant_disable_keyword , _disabled=snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(snake_case , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(snake_case , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=snake_case )
if args.recalibrate_weights:
recalibrate_weights(snake_case )
if args.fuse_qkv:
fuse_qkv(snake_case , snake_case )
if args.clip_gelu:
clip_gelu(snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] )-> Tuple:
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : Optional[Any] )-> Dict:
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : List[str] )-> Tuple:
def fusea(snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_lowerCamelCase = qq._amax.detach().item()
_lowerCamelCase = qk._amax.detach().item()
_lowerCamelCase = qv._amax.detach().item()
_lowerCamelCase = max(snake_case , snake_case , snake_case )
qq._amax.fill_(snake_case )
qk._amax.fill_(snake_case )
qv._amax.fill_(snake_case )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : Optional[int] )-> Optional[Any]:
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_lowerCamelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=snake_case )
_lowerCamelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict )-> List[Any]:
for name, mod in model.named_modules():
if hasattr(snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_lowerCamelCase = mod.weight.shape[0]
_lowerCamelCase = mod._weight_quantizer._amax.detach()
_lowerCamelCase = torch.ones(snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Any:
for name, mod in model.named_modules():
if hasattr(snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_lowerCamelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_lowerCamelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
_lowerCamelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=snake_case , keepdims=snake_case ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
_lowerCamelCase = amax
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : str=25 , snake_case : int=180 , snake_case : Any=None )-> Optional[Any]:
if ignore is None:
_lowerCamelCase = []
elif not isinstance(snake_case , snake_case ):
_lowerCamelCase = [ignore]
_lowerCamelCase = 0
for name, mod in model.named_modules():
if not hasattr(snake_case , 'weight' ):
continue
_lowerCamelCase = max(snake_case , len(snake_case ) )
for name, mod in model.named_modules():
_lowerCamelCase = getattr(snake_case , '_input_quantizer' , snake_case )
_lowerCamelCase = getattr(snake_case , '_weight_quantizer' , snake_case )
if not hasattr(snake_case , 'weight' ):
continue
if type(snake_case ) in ignore:
continue
if [True for s in ignore if type(snake_case ) is str and s in name]:
continue
_lowerCamelCase = f'Act:{input_q.extra_repr()}'
_lowerCamelCase = f'Wgt:{weight_q.extra_repr()}'
_lowerCamelCase = f'{name:{name_width}} {act_str} {wgt_str}'
if len(snake_case ) <= line_width:
logger.info(snake_case )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] )-> Optional[int]:
_lowerCamelCase = 0
for name, mod in model.named_modules():
if isinstance(snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : int , snake_case : Optional[int] , snake_case : List[Any] , snake_case : str )-> Any:
_lowerCamelCase = getattr(snake_case , snake_case , snake_case )
if quantizer_mod is not None:
assert hasattr(snake_case , snake_case )
setattr(snake_case , snake_case , snake_case )
else:
logger.warning(f'{name} has no {quantizer}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[Any] , snake_case : Dict="both" , **snake_case : List[str] )-> Tuple:
_lowerCamelCase = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(snake_case , snake_case , '_input_quantizer' , snake_case , snake_case )
if which in ["weight", "both"]:
set_quantizer(snake_case , snake_case , '_weight_quantizer' , snake_case , snake_case )
logger.info(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : List[Any] , **snake_case : int )-> Optional[Any]:
for name, mod in model.named_modules():
if hasattr(snake_case , '_input_quantizer' ) or hasattr(snake_case , '_weight_quantizer' ):
for n in names:
if re.search(snake_case , snake_case ):
set_quantizers(snake_case , snake_case , **snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(snake_case , snake_case ):
_lowerCamelCase = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(snake_case , snake_case , snake_case )
logger.info(snake_case )
| 650 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase : int =False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :int=18 , lowerCAmelCase__ :str=30 , lowerCAmelCase__ :int=400 , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Tuple=None , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = size if size is not None else {"""height""": 20, """width""": 20}
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_channels
__SCREAMING_SNAKE_CASE : List[Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : str = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = size
__SCREAMING_SNAKE_CASE : Any = do_normalize
__SCREAMING_SNAKE_CASE : int = do_convert_rgb
__SCREAMING_SNAKE_CASE : Any = [512, 1_024, 2_048, 4_096]
__SCREAMING_SNAKE_CASE : List[Any] = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def __magic_name__( self :Dict ) -> Dict:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
__SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _lowercase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = PixaStructImageProcessingTester(self )
@property
def __magic_name__( self :str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_convert_rgb''' ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
__SCREAMING_SNAKE_CASE : str = 2_048
__SCREAMING_SNAKE_CASE : Tuple = image_processor(__snake_case , return_tensors='''pt''' , max_patches=__snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def __magic_name__( self :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[str] = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __magic_name__( self :Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : str = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
__SCREAMING_SNAKE_CASE : List[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
__SCREAMING_SNAKE_CASE : Optional[int] = """Hello"""
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case , header_text=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case , header_text=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
__SCREAMING_SNAKE_CASE : List[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__SCREAMING_SNAKE_CASE : str = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__SCREAMING_SNAKE_CASE : str = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _lowercase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def __magic_name__( self :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
__SCREAMING_SNAKE_CASE : Optional[int] = 3
@property
def __magic_name__( self :Any ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_convert_rgb''' ) )
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : str = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__SCREAMING_SNAKE_CASE : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 696 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class UpperCamelCase_ ( UpperCamelCase__ ):
_A : List[Any] = 'mvp'
_A : Dict = ['past_key_values']
_A : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case__=5_02_67 , snake_case__=10_24 , snake_case__=12 , snake_case__=40_96 , snake_case__=16 , snake_case__=12 , snake_case__=40_96 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=10_24 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=1_00 , snake_case__=8_00 , **snake_case__ , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = classifier_dropout
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = use_prompt
UpperCAmelCase = prompt_length
UpperCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
UpperCAmelCase = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 673 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 0 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( UpperCamelCase__ ):
"""simple docstring"""
__lowerCamelCase = (IPNDMScheduler,)
__lowerCamelCase = (('num_inference_steps', 50),)
def UpperCamelCase ( self , **lowercase ) -> str:
'''simple docstring'''
A__ = {"""num_train_timesteps""": 1000}
config.update(**__snake_case )
return config
def UpperCamelCase ( self , lowercase=0 , **lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , __snake_case )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**__snake_case )
A__ = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
if time_step is None:
A__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
A__ = scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
A__ = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A__ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
A__ = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self , lowercase=0 , **lowercase ) -> Dict:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , __snake_case )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
if time_step is None:
A__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
A__ = scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
A__ = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A__ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
A__ = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , **lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__snake_case )
A__ = scheduler_class(**__snake_case )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(__snake_case , __snake_case )
A__ = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A__ = model(__snake_case , __snake_case )
A__ = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , __snake_case )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__snake_case )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , "set_timesteps" ):
scheduler.set_timesteps(__snake_case )
elif num_inference_steps is not None and not hasattr(__snake_case , "set_timesteps" ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A__ = dummy_past_residuals[:]
A__ = scheduler.timesteps[5]
A__ = scheduler.timesteps[6]
A__ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
A__ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A__ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
A__ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case , time_step=__snake_case )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__snake_case , time_step=__snake_case )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 514 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCAmelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCAmelCase = [ord(letter) for letter in string.ascii_lowercase]
UpperCAmelCase = {ord(char) for char in VALID_CHARS}
UpperCAmelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[Any] , lowerCAmelCase_: List[str] ):
snake_case_ : str = ""
snake_case_ : int
snake_case_ : int
snake_case_ : int
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ) , lowerCAmelCase_ ):
snake_case_ : int = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : list[str] = []
for key in product(lowerCAmelCase_ , repeat=3 ):
snake_case_ : Any = try_key(lowerCAmelCase_ , lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[int] , lowerCAmelCase_: List[str] ):
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any = "p059_cipher.txt" ):
snake_case_ : list[int]
snake_case_ : list[str]
snake_case_ : str
snake_case_ : str
snake_case_ : str = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding="utf-8" )
snake_case_ : List[str] = [int(lowerCAmelCase_ ) for number in data.strip().split("," )]
snake_case_ : Any = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
snake_case_ : Any = filter_common_word(lowerCAmelCase_ , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
snake_case_ : Dict = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 666 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowercase :
def __init__( self : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : bool = True , _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = scheduler
SCREAMING_SNAKE_CASE = optimizers if isinstance(__snake_case , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE = split_batches
SCREAMING_SNAKE_CASE = step_with_optimizer
SCREAMING_SNAKE_CASE = GradientState()
def __snake_case( self : List[str] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__snake_case , **__snake_case )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__snake_case , **__snake_case )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE = AcceleratorState().num_processes
for _ in range(__snake_case ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__snake_case , **__snake_case )
else:
self.scheduler.step(*__snake_case , **__snake_case )
def __snake_case( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.scheduler.state_dict()
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
self.scheduler.load_state_dict(__snake_case )
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.scheduler.get_lr()
def __snake_case( self : Any , *_UpperCamelCase : int , **_UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return self.scheduler.print_lr(*__snake_case , **__snake_case )
| 403 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ = 1_000 ):
lowerCamelCase_ = 2**power
lowerCamelCase_ = 0
while n:
lowerCamelCase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 29 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 32
def UpperCAmelCase ( a__ , a__ = 16 ):
'''simple docstring'''
lowerCAmelCase :List[str] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase :str = load_dataset('glue' , 'mrpc' )
def tokenize_function(a__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase :List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase :int = datasets.map(
a__ , batched=a__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase :Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase :List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase :List[str] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase :Tuple = 8
else:
lowerCAmelCase :Dict = None
return tokenizer.pad(
a__ , padding='longest' , max_length=a__ , pad_to_multiple_of=a__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase :Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
lowerCAmelCase :Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE = mocked_dataloaders # noqa: F811
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , a__ ) == "1":
lowerCAmelCase :Tuple = 2
# New Code #
lowerCAmelCase :List[Any] = int(args.gradient_accumulation_steps )
lowerCAmelCase :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase :List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=a__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase :List[str] = config["""lr"""]
lowerCAmelCase :Dict = int(config['num_epochs'] )
lowerCAmelCase :Optional[int] = int(config['seed'] )
lowerCAmelCase :Tuple = int(config['batch_size'] )
lowerCAmelCase :Dict = evaluate.load('glue' , 'mrpc' )
set_seed(a__ )
lowerCAmelCase :Union[str, Any] = get_dataloaders(a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase :str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase :List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase :Tuple = AdamW(params=model.parameters() , lr=a__ )
# Instantiate scheduler
lowerCAmelCase :Dict = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=1_00 , num_training_steps=(len(a__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase :List[Any] = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# Now we train the model
for epoch in range(a__ ):
model.train()
with LocalSGD(
accelerator=a__ , model=a__ , local_sgd_steps=a__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(a__ ):
lowerCAmelCase :Tuple = model(**a__ )
lowerCAmelCase :Optional[int] = output.loss
accelerator.backward(a__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase :int = model(**a__ )
lowerCAmelCase :int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase :Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a__ , references=a__ , )
lowerCAmelCase :Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , a__ )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a__ , default=a__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=a__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=a__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase :Tuple = parser.parse_args()
lowerCAmelCase :Any = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main() | 553 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 0 |
"""simple docstring"""
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ ):
snake_case_ : int = name
snake_case_ : Optional[int] = val
def __str__(self ):
return f'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__(self , lowercase__ ):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = self.build_heap(__snake_case )
def __getitem__(self , lowercase__ ):
return self.get_value(__snake_case )
def __UpperCamelCase (self , lowercase__ ):
return (idx - 1) // 2
def __UpperCamelCase (self , lowercase__ ):
return idx * 2 + 1
def __UpperCamelCase (self , lowercase__ ):
return idx * 2 + 2
def __UpperCamelCase (self , lowercase__ ):
return self.heap_dict[key]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[int] = len(__snake_case ) - 1
snake_case_ : List[Any] = self.get_parent_idx(__snake_case )
for idx, i in enumerate(__snake_case ):
snake_case_ : Dict = idx
snake_case_ : str = i.val
for i in range(__snake_case , -1 , -1 ):
self.sift_down(__snake_case , __snake_case )
return array
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
while True:
snake_case_ : int = self.get_left_child_idx(__snake_case ) # noqa: E741
snake_case_ : List[str] = self.get_right_child_idx(__snake_case )
snake_case_ : Tuple = idx
if l < len(__snake_case ) and array[l] < array[idx]:
snake_case_ : Dict = l
if r < len(__snake_case ) and array[r] < array[smallest]:
snake_case_ : List[str] = r
if smallest != idx:
snake_case_ : int = array[smallest], array[idx]
(
snake_case_
) : int = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ : Any = smallest
else:
break
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[int] = self.get_parent_idx(__snake_case )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ : str = self.heap[idx], self.heap[p]
snake_case_ : Dict = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ : Union[str, Any] = p
snake_case_ : Tuple = self.get_parent_idx(__snake_case )
def __UpperCamelCase (self ):
return self.heap[0]
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.heap[-1], self.heap[0]
snake_case_ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __UpperCamelCase (self , lowercase__ ):
self.heap.append(__snake_case )
snake_case_ : Dict = len(self.heap ) - 1
snake_case_ : List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def __UpperCamelCase (self ):
return len(self.heap ) == 0
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ : Dict = new_value
snake_case_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
a_ = Node('''R''', -1)
a_ = Node('''B''', 6)
a_ = Node('''A''', 3)
a_ = Node('''X''', 1)
a_ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
a_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a: str = logging.get_logger(__name__)
__a: List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__a: Union[str, Any] = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
__a: Any = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = GPTaTokenizer
def __init__( self : Dict , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Any=None , lowerCamelCase : List[str]="<|endoftext|>" , lowerCamelCase : int="<|endoftext|>" , lowerCamelCase : Union[str, Any]="<|endoftext|>" , lowerCamelCase : Optional[int]=False , **lowerCamelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
_UpperCAmelCase = kwargs.pop("""add_bos_token""" , __snake_case )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __snake_case ) != add_prefix_space:
_UpperCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__snake_case )
_UpperCAmelCase = add_prefix_space
def lowerCamelCase ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase = kwargs.get("""is_split_into_words""" , __snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowerCamelCase ( self : str , *lowerCamelCase : List[str] , **lowerCamelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = kwargs.get("""is_split_into_words""" , __snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__snake_case , **__snake_case )
def lowerCamelCase ( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : "Conversation" ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__snake_case , add_special_tokens=__snake_case ) + [self.eos_token_id] )
if len(__snake_case ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids | 108 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def snake_case_ (__A : Tuple , __A : Union[str, Any] ) -> Dict:
__lowerCAmelCase : List[Any] = XCLIPTextConfig()
# derive patch size from model name
__lowerCAmelCase : int = model_name.find("""patch""" )
__lowerCAmelCase : str = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
__lowerCAmelCase : Dict = XCLIPVisionConfig(patch_size=__A , num_frames=__A )
if "large" in model_name:
__lowerCAmelCase : int = 7_6_8
__lowerCAmelCase : Tuple = 3_0_7_2
__lowerCAmelCase : str = 1_2
__lowerCAmelCase : Optional[Any] = 1_0_2_4
__lowerCAmelCase : List[str] = 4_0_9_6
__lowerCAmelCase : Union[str, Any] = 1_6
__lowerCAmelCase : Union[str, Any] = 2_4
__lowerCAmelCase : Tuple = 7_6_8
__lowerCAmelCase : Union[str, Any] = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__lowerCAmelCase : Dict = 3_3_6
__lowerCAmelCase : Any = XCLIPConfig.from_text_vision_configs(__A , __A )
if "large" in model_name:
__lowerCAmelCase : int = 7_6_8
return config
def snake_case_ (__A : int ) -> Union[str, Any]:
# text encoder
if name == "token_embedding.weight":
__lowerCAmelCase : int = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
__lowerCAmelCase : Union[str, Any] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
__lowerCAmelCase : Union[str, Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase : int = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase : Optional[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase : Any = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
__lowerCAmelCase : List[str] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
__lowerCAmelCase : Optional[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
__lowerCAmelCase : str = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
__lowerCAmelCase : Optional[int] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
__lowerCAmelCase : Dict = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
__lowerCAmelCase : Optional[int] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
__lowerCAmelCase : Any = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
__lowerCAmelCase : Optional[Any] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
__lowerCAmelCase : Optional[int] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
__lowerCAmelCase : List[Any] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
__lowerCAmelCase : Optional[int] = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
__lowerCAmelCase : Union[str, Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
__lowerCAmelCase : List[Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
__lowerCAmelCase : Union[str, Any] = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
__lowerCAmelCase : Union[str, Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
__lowerCAmelCase : int = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def snake_case_ (__A : Union[str, Any] , __A : Optional[Any] ) -> Tuple:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : Dict = orig_state_dict.pop(__A )
if "attn.in_proj" in key:
__lowerCAmelCase : Any = key.split(""".""" )
if key.startswith("""visual""" ):
__lowerCAmelCase : Tuple = key_split[3]
__lowerCAmelCase : Dict = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__lowerCAmelCase : Optional[Any] = val[
:dim, :
]
__lowerCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
__lowerCAmelCase : Any = val[
-dim:, :
]
else:
__lowerCAmelCase : Union[str, Any] = val[
:dim
]
__lowerCAmelCase : Union[str, Any] = val[
dim : dim * 2
]
__lowerCAmelCase : int = val[
-dim:
]
else:
if "weight" in key:
__lowerCAmelCase : str = val[
:dim, :
]
__lowerCAmelCase : Any = val[
dim : dim * 2, :
]
__lowerCAmelCase : Any = val[
-dim:, :
]
else:
__lowerCAmelCase : Any = val[:dim]
__lowerCAmelCase : List[str] = val[
dim : dim * 2
]
__lowerCAmelCase : Any = val[-dim:]
elif key.startswith("""mit""" ):
__lowerCAmelCase : Dict = key_split[2]
__lowerCAmelCase : str = config.vision_config.mit_hidden_size
if "weight" in key:
__lowerCAmelCase : Optional[int] = val[:dim, :]
__lowerCAmelCase : Union[str, Any] = val[dim : dim * 2, :]
__lowerCAmelCase : int = val[-dim:, :]
else:
__lowerCAmelCase : List[Any] = val[:dim]
__lowerCAmelCase : Tuple = val[dim : dim * 2]
__lowerCAmelCase : Any = val[-dim:]
else:
__lowerCAmelCase : Union[str, Any] = key_split[2]
__lowerCAmelCase : int = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase : List[str] = val[:dim, :]
__lowerCAmelCase : List[str] = val[
dim : dim * 2, :
]
__lowerCAmelCase : List[Any] = val[-dim:, :]
else:
__lowerCAmelCase : Optional[Any] = val[:dim]
__lowerCAmelCase : str = val[
dim : dim * 2
]
__lowerCAmelCase : Optional[int] = val[-dim:]
else:
__lowerCAmelCase : Optional[int] = rename_key(__A )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__lowerCAmelCase : Any = val.T
__lowerCAmelCase : Optional[Any] = val
return orig_state_dict
def snake_case_ (__A : Optional[Any] ) -> Optional[int]:
if num_frames == 8:
__lowerCAmelCase : Union[str, Any] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 1_6:
__lowerCAmelCase : Union[str, Any] = """eating_spaghetti.npy"""
elif num_frames == 3_2:
__lowerCAmelCase : Union[str, Any] = """eating_spaghetti_32_frames.npy"""
__lowerCAmelCase : str = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=__A , repo_type="""dataset""" , )
__lowerCAmelCase : Union[str, Any] = np.load(__A )
return list(__A )
def snake_case_ (__A : Optional[int] , __A : Any=None , __A : str=False ) -> Tuple:
__lowerCAmelCase : Tuple = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
__lowerCAmelCase : List[str] = model_to_url[model_name]
__lowerCAmelCase : List[Any] = 8
if "16-frames" in model_name:
__lowerCAmelCase : Any = 1_6
elif "shot" in model_name:
__lowerCAmelCase : Optional[int] = 3_2
__lowerCAmelCase : Union[str, Any] = get_xclip_config(__A , __A )
__lowerCAmelCase : Any = XCLIPModel(__A )
model.eval()
if "drive" in checkpoint_url:
__lowerCAmelCase : Optional[int] = """pytorch_model.bin"""
gdown.cached_download(__A , __A , quiet=__A )
__lowerCAmelCase : Optional[int] = torch.load(__A , map_location="""cpu""" )["""model"""]
else:
__lowerCAmelCase : Optional[Any] = torch.hub.load_state_dict_from_url(__A )["""model"""]
__lowerCAmelCase : Optional[Any] = convert_state_dict(__A , __A )
__lowerCAmelCase : Any = XCLIPModel(__A )
__lowerCAmelCase : Dict = model.load_state_dict(__A , strict=__A )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__lowerCAmelCase : str = 3_3_6 if model_name == """xclip-large-patch14-16-frames""" else 2_2_4
__lowerCAmelCase : Union[str, Any] = VideoMAEImageProcessor(size=__A )
__lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase : List[str] = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase : Tuple = XCLIPProcessor(image_processor=__A , tokenizer=__A )
__lowerCAmelCase : str = prepare_video(__A )
__lowerCAmelCase : List[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=__A , return_tensors="""pt""" , padding=__A )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
__lowerCAmelCase : Any = model(**__A )
# Verify outputs
__lowerCAmelCase : Dict = outputs.logits_per_video
__lowerCAmelCase : Optional[Any] = logits_per_video.softmax(dim=1 )
print("""Probs:""" , __A )
# kinetics-400
if model_name == "xclip-base-patch32":
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__lowerCAmelCase : Tuple = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
__lowerCAmelCase : str = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__lowerCAmelCase : List[Any] = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
__lowerCAmelCase : Optional[int] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__lowerCAmelCase : Union[str, Any] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__lowerCAmelCase : Any = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__lowerCAmelCase : str = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__lowerCAmelCase : Dict = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__lowerCAmelCase : str = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__lowerCAmelCase : Tuple = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__lowerCAmelCase : List[str] = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__lowerCAmelCase : List[str] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__lowerCAmelCase : Optional[Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__lowerCAmelCase : int = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__lowerCAmelCase : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__lowerCAmelCase : Any = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__lowerCAmelCase : int = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(__A , __A , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(__A , organization="""nielsr""" )
processor.push_to_hub(__A , organization="""nielsr""" )
slow_tokenizer.push_to_hub(__A , organization="""nielsr""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 651 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : Dict ={
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any =[
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str =[
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict =[
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 650 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Optional[Any] ={
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =[
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCAmelCase_ : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowerCAmelCase_ : int = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowerCAmelCase_ : Optional[Any] = BeautifulSoup(res.text, '''html.parser''')
lowerCAmelCase_ : Optional[Any] = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 673 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Any:
'''simple docstring'''
A__ = [False] * len(SCREAMING_SNAKE_CASE_ )
A__ = [-1] * len(SCREAMING_SNAKE_CASE_ )
def dfs(SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[int] ):
A__ = True
A__ = c
for u in graph[v]:
if not visited[u]:
dfs(SCREAMING_SNAKE_CASE_ , 1 - c )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE_ , 0 )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 514 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class snake_case__ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_SCREAMING_SNAKE_CASE : Tuple = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
_SCREAMING_SNAKE_CASE : List[str] = Features({"question": Value("string" ), "context": Value("string" )} )
_SCREAMING_SNAKE_CASE : Optional[int] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "question"
_SCREAMING_SNAKE_CASE : Optional[int] = "context"
_SCREAMING_SNAKE_CASE : str = "answers"
@property
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 666 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : str = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 403 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
A_ = {"allegro/herbert-base-cased": 514}
A_ = {}
class __lowerCamelCase ( UpperCamelCase__ ):
a__: Optional[Any] = VOCAB_FILES_NAMES
a__: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__: Any = PRETRAINED_INIT_CONFIGURATION
a__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__: Any = HerbertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="</s>" , **UpperCAmelCase , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sep_token=__snake_case , **__snake_case , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 29 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__SCREAMING_SNAKE_CASE = 50_00_00
__SCREAMING_SNAKE_CASE = os.path.split(__file__)
__SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCAmelCase ( a__ , **a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = dataset.map(**a__ )
@get_duration
def UpperCAmelCase ( a__ , **a__ ):
'''simple docstring'''
lowerCAmelCase :List[str] = dataset.filter(**a__ )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Optional[int] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase :str = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
lowerCAmelCase :str = generate_example_dataset(
os.path.join(a__ , 'dataset.arrow' ) , a__ , num_examples=a__ )
lowerCAmelCase :Dict = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=a__ )
def tokenize(a__ ):
return tokenizer(examples['text'] )
lowerCAmelCase :Union[str, Any] = map(a__ )
lowerCAmelCase :List[Any] = map(a__ , batched=a__ )
lowerCAmelCase :Optional[int] = map(a__ , function=lambda a__ : None , batched=a__ )
with dataset.formatted_as(type='numpy' ):
lowerCAmelCase :Optional[Any] = map(a__ , function=lambda a__ : None , batched=a__ )
with dataset.formatted_as(type='pandas' ):
lowerCAmelCase :int = map(a__ , function=lambda a__ : None , batched=a__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
lowerCAmelCase :List[Any] = map(a__ , function=lambda a__ : None , batched=a__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
lowerCAmelCase :Optional[Any] = map(a__ , function=lambda a__ : None , batched=a__ )
lowerCAmelCase :Optional[Any] = map(a__ , function=a__ , batched=a__ )
lowerCAmelCase :Optional[int] = filter(a__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a__ , 'wb' ) as f:
f.write(json.dumps(a__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 553 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 0 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a_ = logging.getLogger(__name__)
a_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
"""simple docstring"""
_A : Dict = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_A : int = field(
default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__)} , )
_A : List[str] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_A : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_A : Tuple = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_A : Dict = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_A : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_A : Optional[int] = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_A : Any = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __UpperCamelCase (self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __lowercase :
"""simple docstring"""
_A : Any = field(
default=UpperCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
_A : Optional[Any] = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_A : Dict = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""})
_A : Any = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_A : int = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
_A : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
_A : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
_A : Tuple = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_A : List[Any] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
_A : Optional[Any] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_A : Dict = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""})
_A : Any = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def __UpperCamelCase (self ):
if self.train_file is not None:
snake_case_ : Dict = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case_ : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Any = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace())]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = {c: dataset[c] for c in dataset.column_names}
snake_case_ : List[str] = refs
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case_ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
snake_case_ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
snake_case_ : str = {}
if data_args.train_file is not None:
snake_case_ : List[Any] = data_args.train_file
if data_args.validation_file is not None:
snake_case_ : str = data_args.validation_file
snake_case_ : List[Any] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
snake_case_ : List[str] = """text"""
snake_case_ : Optional[Any] = load_dataset(SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : List[Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ : Tuple = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
snake_case_ : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Any = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
snake_case_ : Any = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case_ : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
snake_case_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
snake_case_ : List[str] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case_ : List[str] = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case_ : Union[str, Any] = datasets["""train"""].column_names
else:
snake_case_ : List[str] = datasets["""validation"""].column_names
snake_case_ : Optional[Any] = """text""" if """text""" in column_names else column_names[0]
snake_case_ : Union[str, Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(SCREAMING_SNAKE_CASE__ : Dict ):
# Remove empty lines
snake_case_ : Tuple = [line for line in examples["""text"""] if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=data_args.max_seq_length )
snake_case_ : List[Any] = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case_ : str = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case_ : List[Any] = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case_ : str = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case_ : Optional[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case_ : Any = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case_ : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case_ : str = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case_ : Union[str, Any] = model_args.model_name_or_path
else:
snake_case_ : int = None
snake_case_ : Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ : List[str] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
snake_case_ : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ : Tuple = trainer.evaluate()
snake_case_ : Optional[int] = math.exp(eval_output["""eval_loss"""] )
snake_case_ : List[Any] = perplexity
snake_case_ : Optional[int] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 480 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 0 |
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Any:
_UpperCAmelCase = []
_UpperCAmelCase = 1_1
_UpperCAmelCase = int("""1""" + """0""" * digit_len )
for num in range(__snake_case , __snake_case ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__snake_case , __snake_case ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
_UpperCAmelCase = 1_0
return solutions
def _SCREAMING_SNAKE_CASE ( __snake_case = 2 ) -> Tuple:
_UpperCAmelCase = 1.0
for fraction in fraction_list(__snake_case ):
_UpperCAmelCase = Fraction(__snake_case )
result *= frac.denominator / frac.numerator
return int(__snake_case )
if __name__ == "__main__":
print(solution()) | 108 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__lowerCAmelCase : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__snake_case )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__snake_case , multi_process=__snake_case , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(__snake_case )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """sgugger/tiny-distilbert-classification"""
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , only_pretrain_model=__snake_case , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(__snake_case )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__lowerCAmelCase : int = TensorFlowBenchmark(__snake_case )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(__snake_case )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__snake_case , multi_process=__snake_case , )
__lowerCAmelCase : str = TensorFlowBenchmark(__snake_case , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(__snake_case )
__lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__lowerCAmelCase : Union[str, Any] = TensorFlowBenchmark(__snake_case , [config] )
__lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(__snake_case )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(__snake_case )
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(__snake_case , [config] )
__lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = """patrickvonplaten/t5-tiny-random"""
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(__snake_case , configs=[config] )
__lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__snake_case , multi_process=__snake_case , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(__snake_case )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__snake_case , save_to_csv=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__snake_case , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__snake_case , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__snake_case , """env.csv""" ) , multi_process=__snake_case , )
__lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(__snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(__snake_case , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowerCAmelCase : Dict ):
self.assertTrue(hasattr(__snake_case , """sequential""" ) )
self.assertTrue(hasattr(__snake_case , """cumulative""" ) )
self.assertTrue(hasattr(__snake_case , """current""" ) )
self.assertTrue(hasattr(__snake_case , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__snake_case , """log.txt""" ) , log_print=__snake_case , trace_memory_line_by_line=__snake_case , eager_mode=__snake_case , multi_process=__snake_case , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(__snake_case )
__lowerCAmelCase : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__snake_case , """log.txt""" ) ).exists() )
| 651 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A_ : Union[str, Any] =subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
A_ : Optional[Any] =(
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
)
A_ : int ="|".join(sys.argv[1:])
A_ : Union[str, Any] =re.compile(Rf'^({joined_dirs}).*?\.py$')
A_ : List[Any] =[x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 650 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : int ="\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
__lowerCAmelCase : Any ="\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
__lowerCAmelCase : Tuple ="\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__="binary" ):
__SCREAMING_SNAKE_CASE : Optional[int] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = {}
for id_pred, label in zip(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
__SCREAMING_SNAKE_CASE : Any = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [(pred, label)]
__SCREAMING_SNAKE_CASE : Dict = [], []
for question, preds_labels in question_map.items():
__SCREAMING_SNAKE_CASE : Tuple = zip(*lowercase__ )
__SCREAMING_SNAKE_CASE : str = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='''macro''' )
fas.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = float(sum(lowercase__ ) / len(lowercase__ ) )
__SCREAMING_SNAKE_CASE : str = sum(lowercase__ ) / len(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :str ) -> List[str]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __magic_name__( self :int ) -> Optional[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] ) -> int:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "cb":
return acc_and_fa(__snake_case , __snake_case , fa_avg='''macro''' )
elif self.config_name == "record":
__SCREAMING_SNAKE_CASE : List[str] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
__SCREAMING_SNAKE_CASE : Optional[int] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__snake_case , __snake_case )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__snake_case , __snake_case )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
| 696 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * len(lowerCAmelCase )
UpperCAmelCase = []
UpperCAmelCase = [1] * len(lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase )
while queue:
UpperCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase )
print(max(lowerCAmelCase ) )
# Adjacency list of Graph
lowerCAmelCase_ : List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 673 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a__ ( UpperCamelCase__ ):
"""simple docstring"""
__lowerCamelCase = 'philschmid/bart-large-cnn-samsum'
__lowerCamelCase = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
__lowerCamelCase = 'summarizer'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSeqaSeqLM
__lowerCamelCase = ['text']
__lowerCamelCase = ['text']
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(__snake_case , return_tensors="pt" , truncation=__snake_case )
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
return self.model.generate(**__snake_case )[0]
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
return self.pre_processor.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
| 514 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 0 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] = 2_0_0 ):
snake_case_ : Tuple = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
snake_case_ : Optional[int] = [0] * (pence + 1)
snake_case_ : int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 666 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
_lowerCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowerCamelCase : List[Any] = dict(zip(vocab, range(len(vocab))))
_lowerCamelCase : List[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Union[str, Any] = Path(tmpdirname)
_lowerCamelCase : Tuple = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
_lowerCamelCase : List[str] = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
_lowerCamelCase : List[str] = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_lowerCamelCase : Tuple = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_lowerCamelCase : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_lowerCamelCase : List[Any] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
_lowerCamelCase : str = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_lowerCamelCase : str = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 403 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 0 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( a__=None , a__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class __UpperCamelCase :
lowercase_ : List[Any] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
lowercase_ : Any = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
lowercase_ : List[Any] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
lowercase_ : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
lowercase_ : Dict = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
lowercase_ : Tuple = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
lowercase_ : int = field(default=UpperCamelCase__ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
lowercase_ : Optional[int] = field(default=UpperCamelCase__ , metadata={"""help""": """Benchmark training of model"""} )
lowercase_ : Optional[Any] = field(default=UpperCamelCase__ , metadata={"""help""": """Verbose memory tracing"""} )
lowercase_ : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
lowercase_ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
lowercase_ : Optional[int] = field(default=UpperCamelCase__ , metadata={"""help""": """Trace memory line by line"""} )
lowercase_ : Optional[int] = field(default=UpperCamelCase__ , metadata={"""help""": """Save result to a CSV file"""} )
lowercase_ : Union[str, Any] = field(default=UpperCamelCase__ , metadata={"""help""": """Save all print statements in a log file"""} )
lowercase_ : Any = field(default=UpperCamelCase__ , metadata={"""help""": """Whether to print environment information"""} )
lowercase_ : Dict = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
lowercase_ : Dict = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
lowercase_ : Any = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
lowercase_ : Optional[int] = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
lowercase_ : int = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
lowercase_ : int = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
lowercase_ : Optional[Any] = field(
default=f"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
lowercase_ : List[str] = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
lowercase_ : Optional[Any] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , __snake_case , )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True | 553 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
_A : List[str] = DDIMPipeline
_A : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_A : int = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
_A : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_A : Union[str, Any] = False
def __UpperCamelCase (self ):
torch.manual_seed(0 )
snake_case_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
snake_case_ : str = DDIMScheduler()
snake_case_ : str = {"""unet""": unet, """scheduler""": scheduler}
return components
def __UpperCamelCase (self , lowercase__ , lowercase__=0 ):
if str(__snake_case ).startswith("""mps""" ):
snake_case_ : List[str] = torch.manual_seed(__snake_case )
else:
snake_case_ : List[str] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case_ : str = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase (self ):
snake_case_ : Any = """cpu"""
snake_case_ : Tuple = self.get_dummy_components()
snake_case_ : Union[str, Any] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case_ : str = self.get_dummy_inputs(__snake_case )
snake_case_ : int = pipe(**__snake_case ).images
snake_case_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
snake_case_ : List[str] = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
snake_case_ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
def __UpperCamelCase (self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCamelCase (self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def __UpperCamelCase (self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __UpperCamelCase (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = """google/ddpm-cifar10-32"""
snake_case_ : Dict = UNetaDModel.from_pretrained(__snake_case )
snake_case_ : Dict = DDIMScheduler()
snake_case_ : Optional[int] = DDIMPipeline(unet=__snake_case , scheduler=__snake_case )
ddim.to(__snake_case )
ddim.set_progress_bar_config(disable=__snake_case )
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : int = ddim(generator=__snake_case , eta=0.0 , output_type="""numpy""" ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : List[Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = """google/ddpm-ema-bedroom-256"""
snake_case_ : str = UNetaDModel.from_pretrained(__snake_case )
snake_case_ : Any = DDIMScheduler.from_pretrained(__snake_case )
snake_case_ : Any = DDIMPipeline(unet=__snake_case , scheduler=__snake_case )
ddpm.to(__snake_case )
ddpm.set_progress_bar_config(disable=__snake_case )
snake_case_ : Union[str, Any] = torch.manual_seed(0 )
snake_case_ : List[str] = ddpm(generator=__snake_case , output_type="""numpy""" ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ : Union[str, Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 480 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a: Optional[Any] = logging.get_logger(__name__)
__a: Union[str, Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , lowerCamelCase : Optional[int]=224 , lowerCamelCase : str=4 , lowerCamelCase : Tuple=3 , lowerCamelCase : Any=96 , lowerCamelCase : Any=[2, 2, 6, 2] , lowerCamelCase : Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase : Optional[int]=7 , lowerCamelCase : str=4.0 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : int=0.1 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Any=False , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : Dict=1E-5 , lowerCamelCase : Dict=32 , lowerCamelCase : Any=None , lowerCamelCase : List[str]=None , **lowerCamelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**__snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(__snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(__snake_case ) - 1) )
_UpperCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(__snake_case ) + 1 )]
_UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 1E-4 | 108 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowerCAmelCase : Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__lowerCAmelCase : Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
__lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
__lowerCAmelCase : List[str] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCAmelCase : str = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
__lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCAmelCase : Dict = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
__lowerCAmelCase : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Dict = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : Union[str, Any] = image_processor(__snake_case , return_tensors="""np""" )
__lowerCAmelCase : Any = processor(images=__snake_case , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
__lowerCAmelCase : Any = """lower newer"""
__lowerCAmelCase : Tuple = processor(text=__snake_case )
__lowerCAmelCase : Dict = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
__lowerCAmelCase : List[Any] = """lower newer"""
__lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[Any] = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : int = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
__lowerCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Optional[Any] = processor.batch_decode(__snake_case )
__lowerCAmelCase : Dict = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
__lowerCAmelCase : Optional[int] = """lower newer"""
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 651 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ : List[str] = "Pix2StructImageProcessor"
SCREAMING_SNAKE_CASE__ : str = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , a__ , a__ ):
_lowerCamelCase = False
super().__init__(__snake_case , __snake_case )
def __call__( self , a__=None , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 20_48 , a__ = 0 , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_lowerCamelCase = self.tokenizer
_lowerCamelCase = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_lowerCamelCase = self.image_processor(
__snake_case , return_tensors=__snake_case , max_patches=__snake_case , **__snake_case )
else:
# add pixel_values and bbox
_lowerCamelCase = self.image_processor(
__snake_case , return_tensors=__snake_case , max_patches=__snake_case , header_text=__snake_case , **__snake_case )
if text is not None and not self.image_processor.is_vqa:
_lowerCamelCase = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
if "attention_mask" in text_encoding:
_lowerCamelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_lowerCamelCase = text_encoding.pop('input_ids' )
else:
_lowerCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__snake_case )
return encoding_image_processor
def snake_case_ ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def snake_case_ ( self , *a__ , **a__ ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def snake_case_ ( self ):
_lowerCamelCase = self.tokenizer.model_input_names
_lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 650 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
import math
import tensorflow as tf
from packaging import version
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(math.pi , x.dtype )
__SCREAMING_SNAKE_CASE : int = tf.cast(0.04_4715 , x.dtype )
__SCREAMING_SNAKE_CASE : Tuple = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase__ , 3 )) ))
return x * cdf
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor(lowercase__ )
return x * tf.tanh(tf.math.softplus(lowercase__ ) )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(0.04_4715 , x.dtype )
__SCREAMING_SNAKE_CASE : Tuple = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _UpperCamelCase ( lowercase__ ):
return tf.clip_by_value(_gelu(lowercase__ ) , -10 , 10 )
def _UpperCamelCase ( lowercase__ , lowercase__=-1 ):
__SCREAMING_SNAKE_CASE : List[Any] = tf.split(lowercase__ , 2 , axis=lowercase__ )
return a * tf.math.sigmoid(lowercase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def _UpperCamelCase ( lowercase__ ):
return tf.keras.activations.gelu(lowercase__ , approximate=lowercase__ )
__lowerCAmelCase : List[str] =tf.keras.activations.gelu
__lowerCAmelCase : Dict =approximate_gelu_wrap
else:
__lowerCAmelCase : Dict =_gelu
__lowerCAmelCase : str =_gelu_new
__lowerCAmelCase : Any ={
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def _UpperCamelCase ( lowercase__ ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 696 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase_ : Tuple = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
_A : int = field(default=UpperCamelCase__ , metadata={'help': 'Whether to use SortishSampler or not.'} )
_A : Optional[Any] = field(
default=UpperCamelCase__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_A : Any = field(
default=UpperCamelCase__ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
_A : Any = field(
default=UpperCamelCase__ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
_A : int = field(
default=UpperCamelCase__ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase = v.to_dict()
return d
| 673 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return 12
@property
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__snake_case )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ = 12
A__ = 12
A__ = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
A__ = TransformeraDModel(**__snake_case )
return model
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = """cpu"""
A__ = self.dummy_vqvae
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_transformer
A__ = VQDiffusionScheduler(self.num_embed )
A__ = LearnedClassifierFreeSamplingEmbeddings(learnable=__snake_case )
A__ = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
A__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A__ = """teddy bear playing in the pool"""
A__ = torch.Generator(device=__snake_case ).manual_seed(0 )
A__ = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type="np" )
A__ = output.images
A__ = torch.Generator(device=__snake_case ).manual_seed(0 )
A__ = pipe(
[prompt] , generator=__snake_case , output_type="np" , return_dict=__snake_case , num_inference_steps=2 )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A__ = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = """cpu"""
A__ = self.dummy_vqvae
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_transformer
A__ = VQDiffusionScheduler(self.num_embed )
A__ = LearnedClassifierFreeSamplingEmbeddings(
learnable=__snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
A__ = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
A__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A__ = """teddy bear playing in the pool"""
A__ = torch.Generator(device=__snake_case ).manual_seed(0 )
A__ = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type="np" )
A__ = output.images
A__ = torch.Generator(device=__snake_case ).manual_seed(0 )
A__ = pipe(
[prompt] , generator=__snake_case , output_type="np" , return_dict=__snake_case , num_inference_steps=2 )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A__ = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
A__ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
A__ = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
A__ = torch.Generator(device=__snake_case ).manual_seed(0 )
A__ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=__snake_case , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 514 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( UpperCamelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = KandinskyInpaintPipeline
_SCREAMING_SNAKE_CASE : List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
_SCREAMING_SNAKE_CASE : Dict = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_SCREAMING_SNAKE_CASE : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Tuple = False
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
return 1_00
@property
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase__ ( self : str ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
snake_case_ : Tuple = MultilingualCLIP(__snake_case )
snake_case_ : Optional[int] = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ : Union[str, Any] = UNetaDConditionModel(**__snake_case )
return model
@property
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.dummy_text_encoder
snake_case_ : Optional[Any] = self.dummy_tokenizer
snake_case_ : Optional[Any] = self.dummy_unet
snake_case_ : Tuple = self.dummy_movq
snake_case_ : List[str] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="epsilon" , thresholding=__snake_case , )
snake_case_ : str = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase__ ( self : str , A__ : Optional[Any] , A__ : int=0 ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
snake_case_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : str = Image.fromarray(np.uinta(__snake_case ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
snake_case_ : Dict = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : Any = 0
if str(__snake_case ).startswith("mps" ):
snake_case_ : Dict = torch.manual_seed(__snake_case )
else:
snake_case_ : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case_ : List[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
snake_case_ : Tuple = """cpu"""
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : Union[str, Any] = self.pipeline_class(**__snake_case )
snake_case_ : Tuple = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case_ : Tuple = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case_ : List[Any] = output.images
snake_case_ : Any = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case_ : int = image[0, -3:, -3:, -1]
snake_case_ : str = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
snake_case_ : Optional[Any] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
snake_case_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case_ : List[Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
snake_case_ : Any = 0
snake_case_ : int = """a hat"""
snake_case_ : int = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case_ : Dict = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
snake_case_ : int = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case_ : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ : Dict = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
snake_case_ : Optional[Any] = pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 666 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE = n - k
# Calculate C(n,k)
for i in range(UpperCAmelCase__ ):
result *= n - i
result //= i + 1
return result
def __lowerCamelCase (UpperCAmelCase__ : List[str] ):
return binomial_coefficient(2 * node_count , UpperCAmelCase__ ) // (node_count + 1)
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
if n < 0:
raise ValueError("factorial() not defined for negative values" )
SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
return catalan_number(UpperCAmelCase__ ) * factorial(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 403 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
debug_launcher(test_script.main )
def UpperCAmelCase__ ( self ):
debug_launcher(test_ops.main )
| 29 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 0 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase ( a__ = 8 ):
'''simple docstring'''
lowerCAmelCase :Optional[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(a__ ) for _ in range(a__ ) )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
i -= len(a__ )
lowerCAmelCase :Union[str, Any] = i // 3
lowerCAmelCase :List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase :Union[str, Any] = (
chars_incl
+ random(a__ , quotient + remainder )
+ random(a__ , a__ )
+ random(a__ , a__ )
)
lowerCAmelCase :Optional[int] = list(a__ )
shuffle(a__ )
return "".join(a__ )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
return "".join(secrets.choice(a__ ) for _ in range(a__ ) )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase ( a__ , a__ = 8 ):
'''simple docstring'''
if len(a__ ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase :int = any(char in ascii_uppercase for char in password )
lowerCAmelCase :Optional[int] = any(char in ascii_lowercase for char in password )
lowerCAmelCase :Any = any(char in digits for char in password )
lowerCAmelCase :List[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :str = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCAmelCase :int = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(a__ ) )
print(
'Alternative Password generated:' , alternative_password_generator(a__ , a__ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main() | 553 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( UpperCamelCase__ , UpperCamelCase__):
"""simple docstring"""
@register_to_config
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None ):
super().__init__()
snake_case_ : int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ : Dict = torch.zeros(__snake_case , __snake_case )
else:
snake_case_ : Tuple = None
snake_case_ : int = torch.nn.Parameter(__snake_case )
class __lowercase ( UpperCamelCase__):
"""simple docstring"""
_A : Optional[int] = 42
_A : Optional[int] = 42
_A : Optional[Any] = 42
_A : Tuple = 42
_A : int = 42
_A : Tuple = 42
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
vqvae=__snake_case , transformer=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : List[str] = len(__snake_case ) if isinstance(__snake_case , __snake_case ) else 1
# get prompt text embeddings
snake_case_ : str = self.tokenizer(
__snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case_ : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ : Optional[Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate text embeddings for each generation per prompt
snake_case_ : Any = prompt_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ : Union[str, Any] = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ : Optional[Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(__snake_case , 1 , 1 )
else:
snake_case_ : Any = [""""""] * batch_size
snake_case_ : List[str] = text_input_ids.shape[-1]
snake_case_ : Tuple = self.tokenizer(
__snake_case , padding="""max_length""" , max_length=__snake_case , truncation=__snake_case , return_tensors="""pt""" , )
snake_case_ : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ : Any = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : Any = negative_prompt_embeds.shape[1]
snake_case_ : str = negative_prompt_embeds.repeat(1 , __snake_case , 1 )
snake_case_ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__(self , lowercase__ , lowercase__ = 1_00 , lowercase__ = 5.0 , lowercase__ = 1.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , ):
if isinstance(__snake_case , __snake_case ):
snake_case_ : str = 1
elif isinstance(__snake_case , __snake_case ):
snake_case_ : List[str] = len(__snake_case )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__snake_case )}' )
snake_case_ : List[str] = batch_size * num_images_per_prompt
snake_case_ : Dict = guidance_scale > 1.0
snake_case_ : Union[str, Any] = self._encode_prompt(__snake_case , __snake_case , __snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__snake_case )}.' )
# get the initial completely masked latents unless the user supplied it
snake_case_ : List[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ : List[Any] = self.transformer.num_vector_embeds - 1
snake_case_ : Union[str, Any] = torch.full(__snake_case , __snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
snake_case_ : Dict = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
snake_case_ : Optional[Any] = self.scheduler.timesteps.to(self.device )
snake_case_ : List[Any] = latents
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ : List[str] = self.transformer(__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case ).sample
if do_classifier_free_guidance:
snake_case_ : List[str] = model_output.chunk(2 )
snake_case_ : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__snake_case , dim=1 , keepdim=__snake_case )
snake_case_ : Any = self.truncate(__snake_case , __snake_case )
# remove `log(0)`'s (`-inf`s)
snake_case_ : Union[str, Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
snake_case_ : Tuple = self.vqvae.config.vq_embed_dim
snake_case_ : str = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ : Optional[int] = self.vqvae.quantize.get_codebook_entry(__snake_case , shape=__snake_case )
snake_case_ : Any = self.vqvae.decode(__snake_case , force_not_quantize=__snake_case ).sample
snake_case_ : Any = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = torch.sort(__snake_case , 1 , descending=__snake_case )
snake_case_ : Any = torch.exp(__snake_case )
snake_case_ : Dict = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ : Dict = torch.full_like(keep_mask[:, 0:1, :] , __snake_case )
snake_case_ : str = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ : Tuple = keep_mask[:, :-1, :]
snake_case_ : Optional[int] = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ : Optional[int] = log_p_x_0.clone()
snake_case_ : Union[str, Any] = -torch.inf # -inf = log(0)
return rv
| 480 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Tuple = logging.get_logger(__name__)
__a: Tuple = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
'''simple docstring'''
_lowerCamelCase = '''mra'''
def __init__( self : Tuple , lowerCamelCase : int=5_0265 , lowerCamelCase : Optional[int]=768 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : List[Any]=12 , lowerCamelCase : int=3072 , lowerCamelCase : str="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Union[str, Any]=512 , lowerCamelCase : Any=1 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : Dict=1E-5 , lowerCamelCase : int="absolute" , lowerCamelCase : Any=4 , lowerCamelCase : Tuple="full" , lowerCamelCase : int=0 , lowerCamelCase : str=0 , lowerCamelCase : str=1 , lowerCamelCase : str=0 , lowerCamelCase : Tuple=2 , **lowerCamelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = block_per_row
_UpperCAmelCase = approx_mode
_UpperCAmelCase = initial_prior_first_n_blocks
_UpperCAmelCase = initial_prior_diagonal_n_blocks | 108 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def snake_case_ (__A : int , __A : Optional[Any] ) -> int:
__lowerCAmelCase : List[str] = int(__A )
assert noofclusters < len(__A )
# Find out the dimensionality
__lowerCAmelCase : Union[str, Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
__lowerCAmelCase : List[Any] = list(range(len(__A ) ) )
shuffle(__A )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__lowerCAmelCase : Any = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__lowerCAmelCase : Any = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__lowerCAmelCase : Dict = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__A )
]
##These nodes will assign the centroid Variables the appropriate
##values
__lowerCAmelCase : List[Any] = tf.placeholder("""float64""" , [dim] )
__lowerCAmelCase : Optional[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(__A , __A ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__lowerCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(__A ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__lowerCAmelCase : Optional[Any] = tf.placeholder("""int32""" )
__lowerCAmelCase : List[str] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__A , __A ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__lowerCAmelCase : List[str] = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__lowerCAmelCase : List[str] = tf.reduce_mean(__A , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" , [dim] )
__lowerCAmelCase : Any = tf.placeholder("""float""" , [dim] )
__lowerCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__A , __A ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__lowerCAmelCase : Any = tf.placeholder("""float""" , [noofclusters] )
__lowerCAmelCase : Tuple = tf.argmin(__A , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__lowerCAmelCase : str = tf.initialize_all_variables()
# Initialize all variables
sess.run(__A )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__lowerCAmelCase : List[str] = 1_0_0
for _ in range(__A ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__A ) ):
__lowerCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__lowerCAmelCase : Optional[Any] = [
sess.run(__A , feed_dict={va: vect, va: sess.run(__A )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__lowerCAmelCase : Tuple = sess.run(
__A , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__A ):
# Collect all the vectors assigned to this cluster
__lowerCAmelCase : List[str] = [
vectors[i]
for i in range(len(__A ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__lowerCAmelCase : Tuple = sess.run(
__A , feed_dict={mean_input: array(__A )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__lowerCAmelCase : List[Any] = sess.run(__A )
__lowerCAmelCase : int = sess.run(__A )
return centroids, assignments
| 651 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Dict = 0.0 , snake_case : int = 1.0 )-> Union[str, Any]:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 0 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase_ ( UpperCamelCase__ ):
_A : Tuple = 42
_A : Dict = None
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=0.9_99 , lowerCAmelCase="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase = []
for i in range(lowerCAmelCase ):
UpperCAmelCase = i / num_diffusion_timesteps
UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase ) / alpha_bar_fn(lowerCAmelCase ) , lowerCAmelCase ) )
return torch.tensor(lowerCAmelCase , dtype=torch.floataa )
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ = 10_00 , snake_case__ = "fixed_small_log" , snake_case__ = True , snake_case__ = 1.0 , snake_case__ = "epsilon" , snake_case__ = "squaredcos_cap_v2" , ) -> Any:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
UpperCAmelCase = betas_for_alpha_bar(__snake_case )
UpperCAmelCase = 1.0 - self.betas
UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase = 1.0
# setable values
UpperCAmelCase = None
UpperCAmelCase = torch.from_numpy(np.arange(0 , __snake_case )[::-1].copy() )
UpperCAmelCase = variance_type
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Any:
"""simple docstring"""
return sample
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = num_inference_steps
UpperCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase = (np.arange(0 , __snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase = torch.from_numpy(__snake_case ).to(__snake_case )
def UpperCamelCase_ ( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ) -> Optional[Any]:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase = t - 1
UpperCAmelCase = self.alphas_cumprod[t]
UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase = self.betas[t]
else:
UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase = torch.log(torch.clamp(__snake_case , min=1e-20 ) )
UpperCAmelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase = variance.log()
UpperCAmelCase = beta.log()
UpperCAmelCase = (predicted_variance + 1) / 2
UpperCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__=None , snake_case__ = True , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase = torch.split(__snake_case , sample.shape[1] , dim=1 )
else:
UpperCAmelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase = t - 1
UpperCAmelCase = self.alphas_cumprod[t]
UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase = self.betas[t]
UpperCAmelCase = self.alphas[t]
else:
UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase = torch.clamp(
__snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase = 0
if t > 0:
UpperCAmelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__snake_case , device=model_output.device )
UpperCAmelCase = self._get_variance(
__snake_case , predicted_variance=__snake_case , prev_timestep=__snake_case , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase = variance
elif self.variance_type == "learned_range":
UpperCAmelCase = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
""" for the UnCLIPScheduler.""" )
UpperCAmelCase = variance * variance_noise
UpperCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__snake_case , pred_original_sample=__snake_case )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase = timesteps.to(original_samples.device )
UpperCAmelCase = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 673 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
lowerCAmelCase__ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
"emoji": True,
},
}
]
lowerCAmelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCAmelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCAmelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCAmelCase__ = line["nodeid"]
if line.get("""duration""", None) is not None:
lowerCAmelCase__ = f"""{line['duration']:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase__ = []
log.unlink()
lowerCAmelCase__ = ""
lowerCAmelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
for test in failed_tests:
lowerCAmelCase__ = test[0].split("""::""")
lowerCAmelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCAmelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase__ = [test[0] for test in failed_table]
lowerCAmelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCAmelCase__ = "Too many failed tests, please see the full report in the Action results."
lowerCAmelCase__ = len(err) + 1_0
lowerCAmelCase__ = message[: 3_0_0_0 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowerCAmelCase__ = "No failed tests! 🤗"
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCAmelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCAmelCase__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
lowerCAmelCase__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
lowerCAmelCase__ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCAmelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCAmelCase__ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase__ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase__ = row[0]
else:
lowerCAmelCase__ = ""
lowerCAmelCase__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 514 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = ["model.decoder.embed_positions.weights"]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[int] ):
if "emb" in name:
snake_case_ : List[str] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
snake_case_ : Optional[int] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
snake_case_ : List[str] = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
snake_case_ : Tuple = name.replace("linear1" , "fc1" )
if "linear2" in name:
snake_case_ : Tuple = name.replace("linear2" , "fc2" )
if "norm1" in name:
snake_case_ : Optional[int] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
snake_case_ : Optional[int] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
snake_case_ : str = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
snake_case_ : Any = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
snake_case_ : Dict = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ : List[str] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: int ):
snake_case_ : List[Any] = list(state_dict.keys() )
snake_case_ : List[Any] = {}
for key in keys:
snake_case_ : Union[str, Any] = state_dict.pop(lowerCAmelCase_ )
snake_case_ : List[Any] = rename_keys(lowerCAmelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ : Optional[Any] = val[:hidden_size, :]
snake_case_ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
snake_case_ : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ : List[Any] = val
else:
snake_case_ : Dict = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any ):
if checkpoint == "small":
# default config values
snake_case_ : int = 1_0_2_4
snake_case_ : str = 2_4
snake_case_ : int = 1_6
elif checkpoint == "medium":
snake_case_ : int = 1_5_3_6
snake_case_ : List[Any] = 4_8
snake_case_ : List[str] = 2_4
elif checkpoint == "large":
snake_case_ : List[Any] = 2_0_4_8
snake_case_ : Any = 4_8
snake_case_ : Union[str, Any] = 3_2
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
snake_case_ : Tuple = MusicgenDecoderConfig(
hidden_size=lowerCAmelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCAmelCase_ , num_attention_heads=lowerCAmelCase_ , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: Tuple=None , lowerCAmelCase_: List[Any]=None , lowerCAmelCase_: Union[str, Any]="cpu" ):
snake_case_ : str = MusicGen.get_pretrained(lowerCAmelCase_ , device=lowerCAmelCase_ )
snake_case_ : List[Any] = decoder_config_from_checkpoint(lowerCAmelCase_ )
snake_case_ : int = fairseq_model.lm.state_dict()
snake_case_ : Optional[int] = rename_state_dict(
lowerCAmelCase_ , hidden_size=decoder_config.hidden_size )
snake_case_ : List[Any] = TaEncoderModel.from_pretrained("t5-base" )
snake_case_ : List[Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
snake_case_ : Any = MusicgenForCausalLM(lowerCAmelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ : List[str] = decoder.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(lowerCAmelCase_ ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
snake_case_ : List[Any] = MusicgenForConditionalGeneration(text_encoder=lowerCAmelCase_ , audio_encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCAmelCase_ )
# check we can do a forward pass
snake_case_ : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ : int = model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
snake_case_ : Dict = AutoTokenizer.from_pretrained("t5-base" )
snake_case_ : List[Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
snake_case_ : Optional[int] = MusicgenProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
# set the appropriate bos/pad token ids
snake_case_ : List[str] = 2_0_4_8
snake_case_ : Optional[int] = 2_0_4_8
# set other default generation config params
snake_case_ : str = int(3_0 * audio_encoder.config.frame_rate )
snake_case_ : Union[str, Any] = True
snake_case_ : Any = 3.0
if pytorch_dump_folder is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(lowerCAmelCase_ )
processor.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
UpperCAmelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 666 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowercase ( UpperCamelCase__ ):
def __snake_case( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __snake_case( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaises(__snake_case ):
SCREAMING_SNAKE_CASE = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
with self.assertRaises(__snake_case ):
SCREAMING_SNAKE_CASE = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def __snake_case( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __snake_case( self : int ) -> Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def __snake_case( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __snake_case( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __snake_case( self : Optional[Any] ) -> Dict:
'''simple docstring'''
import PIL.Image
SCREAMING_SNAKE_CASE = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=__snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
SCREAMING_SNAKE_CASE = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , __snake_case )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = pa.BufferReader(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , pa.Buffer ) else pa.memory_map(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = pa.ipc.open_stream(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE = pa.schema(UpperCAmelCase__ ) if fields else None
with ArrowWriter(stream=UpperCAmelCase__ , schema=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCAmelCase__ , features=UpperCAmelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE = pa.ipc.open_stream(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = f.read_all()
SCREAMING_SNAKE_CASE = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCAmelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ , hash_salt="split_name" , check_duplicates=UpperCAmelCase__ , ) as writer:
with pytest.raises(UpperCAmelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
SCREAMING_SNAKE_CASE = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 1_0] )
def __lowerCamelCase (UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ , hash_salt="split_name" , check_duplicates=UpperCAmelCase__ , ) as writer:
with pytest.raises(UpperCAmelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=1_0 )
writer.write({"col_1": "bar", "col_2": 2} , key=1_0 )
SCREAMING_SNAKE_CASE = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 1_0] )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ , hash_salt="split_name" , check_duplicates=UpperCAmelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE = pa.schema(UpperCAmelCase__ ) if fields else None
with ArrowWriter(stream=UpperCAmelCase__ , schema=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE = pa.schema(UpperCAmelCase__ ) if fields else None
with ArrowWriter(stream=UpperCAmelCase__ , schema=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE = pa.schema(UpperCAmelCase__ ) if fields else None
with ArrowWriter(stream=UpperCAmelCase__ , schema=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ():
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCAmelCase__ , schema=pa.schema(UpperCAmelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCAmelCase__ , 1 )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] ):
if pa.types.is_list(UpperCAmelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ):
if isinstance(lst[0] , UpperCAmelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = pa.array(TypedSequence(UpperCAmelCase__ , optimized_int_type=UpperCAmelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : str ):
# in range
SCREAMING_SNAKE_CASE = pa.array(OptimizedTypedSequence(UpperCAmelCase__ , col=UpperCAmelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE = copy.deepcopy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = pa.array(OptimizedTypedSequence(UpperCAmelCase__ , col=UpperCAmelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCAmelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = """mock://dataset-train.arrow"""
with ArrowWriter(path=UpperCAmelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCAmelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCAmelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
SCREAMING_SNAKE_CASE = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE = pq.read_table(UpperCAmelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict ):
import PIL.Image
SCREAMING_SNAKE_CASE = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCAmelCase__ , format="png" )
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCAmelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCAmelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE = pq.read_table(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCAmelCase__ )
with open(UpperCAmelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCAmelCase__ )] )
SCREAMING_SNAKE_CASE = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCAmelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCAmelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 403 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 0 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCamelCase ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , **UpperCAmelCase , ):
super().__init__(
__snake_case , split=__snake_case , features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
lowerCamelCase_ = path_or_paths if isinstance(__snake_case , __snake_case ) else {self.split: path_or_paths}
lowerCamelCase_ = Text(
cache_dir=__snake_case , data_files=__snake_case , features=__snake_case , **__snake_case , )
def UpperCAmelCase__ ( self ):
if self.streaming:
lowerCamelCase_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
lowerCamelCase_ = self.builder.as_dataset(
split=self.split , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
| 29 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCAmelCase ( a__ ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__SCREAMING_SNAKE_CASE = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __UpperCamelCase ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase : ArgumentParser ) -> List[Any]:
lowerCAmelCase :List[str] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=__snake_case , required=__snake_case , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=__snake_case , required=__snake_case , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=__snake_case , required=__snake_case , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=__snake_case , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=__snake_case , default=__snake_case , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=__snake_case )
def __init__( self : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , *UpperCAmelCase : Optional[Any] , ) -> Any:
lowerCAmelCase :Union[str, Any] = logging.get_logger('transformers-cli/converting' )
self._logger.info(f"""Loading model {model_type}""" )
lowerCAmelCase :List[Any] = model_type
lowerCAmelCase :List[str] = tf_checkpoint
lowerCAmelCase :Union[str, Any] = pytorch_dump_output
lowerCAmelCase :Tuple = config
lowerCAmelCase :Optional[int] = finetuning_task_name
def UpperCAmelCase__ ( self : int ) -> Dict:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__snake_case )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__snake_case )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__snake_case )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__snake_case )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__snake_case )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase :Tuple = self._tf_checkpoint
lowerCAmelCase :Tuple = """"""
else:
lowerCAmelCase :List[Any] = self._tf_checkpoint
lowerCAmelCase :List[str] = """"""
convert_transfo_xl_checkpoint_to_pytorch(
__snake_case , self._config , self._pytorch_dump_output , __snake_case )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__snake_case )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__snake_case )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' ) | 553 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=4 , ):
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : List[str] = is_training
snake_case_ : List[Any] = use_attention_mask
snake_case_ : List[str] = use_token_type_ids
snake_case_ : Any = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : Optional[int] = type_sequence_label_size
snake_case_ : Any = initializer_range
snake_case_ : Any = num_choices
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Dict = None
if self.use_attention_mask:
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : str = None
if self.use_token_type_ids:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase (self ):
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ : Dict = config_and_inputs
snake_case_ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ : Dict = config_and_inputs
snake_case_ : int = True
snake_case_ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowercase ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
_A : List[Any] = True
_A : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCamelCase (self ):
for model_class_name in self.all_model_classes:
snake_case_ : List[str] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__snake_case )
snake_case_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def __UpperCamelCase (self ):
snake_case_ : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__snake_case )
snake_case_ : int = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
snake_case_ : Optional[int] = model(__snake_case )[0]
snake_case_ : str = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , __snake_case )
# compare the actual values for a slice.
snake_case_ : List[Any] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__snake_case )
snake_case_ : int = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
snake_case_ : List[Any] = model(__snake_case )[0]
# compare the actual values for a slice.
snake_case_ : List[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
| 480 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__a: Any = logging.get_logger(__name__)
__a: Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
'''simple docstring'''
_lowerCamelCase = '''blenderbot-small'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Any , lowerCamelCase : Optional[int]=5_0265 , lowerCamelCase : Any=512 , lowerCamelCase : Tuple=8 , lowerCamelCase : Optional[Any]=2048 , lowerCamelCase : List[Any]=16 , lowerCamelCase : Any=8 , lowerCamelCase : Union[str, Any]=2048 , lowerCamelCase : Any=16 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : str=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=512 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Optional[int]=1 , lowerCamelCase : str=False , lowerCamelCase : List[Any]=0 , lowerCamelCase : int=1 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]=2 , **lowerCamelCase : str , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_UpperCAmelCase = {0: """batch"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_UpperCAmelCase = self.num_layers
for i in range(__snake_case ):
_UpperCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_UpperCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = super().outputs
else:
_UpperCAmelCase = super(__snake_case , self ).outputs
if self.use_past:
_UpperCAmelCase = self.num_layers
for i in range(__snake_case ):
_UpperCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCamelCase ( self : List[Any] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
_UpperCAmelCase = seq_length if not self.use_past else 1
_UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_UpperCAmelCase = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_UpperCAmelCase = dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_UpperCAmelCase = common_inputs["""input_ids"""].shape
_UpperCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
_UpperCAmelCase = self.num_attention_heads
_UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase = decoder_seq_length + 3
_UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 )
_UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCAmelCase = self.num_layers
_UpperCAmelCase = min(__snake_case , __snake_case )
_UpperCAmelCase = max(__snake_case , __snake_case ) - min_num_layers
_UpperCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
_UpperCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def lowerCamelCase ( self : List[str] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_UpperCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = self.num_layers
_UpperCAmelCase = self.num_attention_heads
_UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase = common_inputs["""attention_mask"""].dtype
_UpperCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
_UpperCAmelCase = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def lowerCamelCase ( self : Any , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = tokenizer.num_special_tokens_to_add(__snake_case )
_UpperCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCAmelCase = dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> int:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
_UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
_UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def lowerCamelCase ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Any ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
_UpperCAmelCase = super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case ) | 108 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
from typing import Any
def snake_case_ (__A : Union[str, Any] , __A : Tuple , __A : Optional[int] , __A : List[str] , __A : Tuple , ) -> List[Any]:
_validation(
__A , __A , __A , __A , __A , )
# Creates data structures and fill initial step
__lowerCAmelCase : dict = {}
__lowerCAmelCase : dict = {}
for state in states_space:
__lowerCAmelCase : Optional[int] = observations_space[0]
__lowerCAmelCase : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__lowerCAmelCase : Any = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__A ) ):
__lowerCAmelCase : List[str] = observations_space[o]
__lowerCAmelCase : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__lowerCAmelCase : Union[str, Any] = """"""
__lowerCAmelCase : int = -1
for k_state in states_space:
__lowerCAmelCase : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__lowerCAmelCase : Tuple = probability
__lowerCAmelCase : List[str] = k_state
# Update probabilities and pointers dicts
__lowerCAmelCase : Optional[int] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__lowerCAmelCase : List[str] = arg_max
# The final observation
__lowerCAmelCase : Optional[Any] = observations_space[len(__A ) - 1]
# argmax for given final observation
__lowerCAmelCase : List[Any] = """"""
__lowerCAmelCase : List[str] = -1
for k_state in states_space:
__lowerCAmelCase : Any = probabilities[(k_state, final_observation)]
if probability > max_probability:
__lowerCAmelCase : List[str] = probability
__lowerCAmelCase : List[str] = k_state
__lowerCAmelCase : Tuple = arg_max
# Process pointers backwards
__lowerCAmelCase : Any = last_state
__lowerCAmelCase : List[Any] = []
for o in range(len(__A ) - 1 , -1 , -1 ):
result.append(__A )
__lowerCAmelCase : Tuple = pointers[previous, observations_space[o]]
result.reverse()
return result
def snake_case_ (__A : List[str] , __A : Dict , __A : str , __A : Dict , __A : Optional[int] , ) -> Tuple:
_validate_not_empty(
__A , __A , __A , __A , __A , )
_validate_lists(__A , __A )
_validate_dicts(
__A , __A , __A )
def snake_case_ (__A : List[Any] , __A : int , __A : Dict , __A : str , __A : str , ) -> str:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def snake_case_ (__A : Optional[int] , __A : int ) -> Tuple:
_validate_list(__A , """observations_space""" )
_validate_list(__A , """states_space""" )
def snake_case_ (__A : Union[str, Any] , __A : List[Any] ) -> Any:
if not isinstance(_object , __A ):
__lowerCAmelCase : Any = f'''{var_name} must be a list'''
raise ValueError(__A )
else:
for x in _object:
if not isinstance(__A , __A ):
__lowerCAmelCase : List[str] = f'''{var_name} must be a list of strings'''
raise ValueError(__A )
def snake_case_ (__A : Optional[Any] , __A : List[Any] , __A : Union[str, Any] , ) -> str:
_validate_dict(__A , """initial_probabilities""" , __A )
_validate_nested_dict(__A , """transition_probabilities""" )
_validate_nested_dict(__A , """emission_probabilities""" )
def snake_case_ (__A : Optional[int] , __A : int ) -> Optional[Any]:
_validate_dict(_object , __A , __A )
for x in _object.values():
_validate_dict(__A , __A , __A , __A )
def snake_case_ (__A : List[Any] , __A : Tuple , __A : Optional[int] , __A : str = False ) -> Union[str, Any]:
if not isinstance(_object , __A ):
__lowerCAmelCase : int = f'''{var_name} must be a dict'''
raise ValueError(__A )
if not all(isinstance(__A , __A ) for x in _object ):
__lowerCAmelCase : Tuple = f'''{var_name} all keys must be strings'''
raise ValueError(__A )
if not all(isinstance(__A , __A ) for x in _object.values() ):
__lowerCAmelCase : Tuple = """nested dictionary """ if nested else """"""
__lowerCAmelCase : Any = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(__A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 651 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Dict , snake_case : Tuple=0 )-> List[str]:
# Format the message.
if name is None:
_lowerCamelCase = None
else:
_lowerCamelCase = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_lowerCamelCase = fmt.format(snake_case )
# Print and recurse (if needed).
if isinstance(snake_case , snake_case ):
if msg is not None:
print(snake_case )
for k in val.keys():
recursive_print(snake_case , val[k] , spaces + 2 )
elif isinstance(snake_case , torch.Tensor ):
print(snake_case , ':' , val.size() )
else:
print(snake_case , ':' , snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : int , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> Dict:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_lowerCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCamelCase = param.view(*snake_case )
_lowerCamelCase = param.transpose(0 , 2 )
_lowerCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCamelCase = param.view(*snake_case )
_lowerCamelCase = param.transpose(0 , 1 ).contiguous()
_lowerCamelCase = param.view(*snake_case )
return param
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Tuple )-> Union[str, Any]:
# The converted output model.
_lowerCamelCase = {}
# old versions did not store training args
_lowerCamelCase = input_state_dict.get('args' , snake_case )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCamelCase = ds_args.padded_vocab_size
_lowerCamelCase = ds_args.max_position_embeddings
_lowerCamelCase = ds_args.hidden_size
_lowerCamelCase = ds_args.num_layers
_lowerCamelCase = ds_args.num_attention_heads
_lowerCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCamelCase = config.n_head
# The hidden_size per head.
_lowerCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCamelCase = input_state_dict["""checkpoint_version"""]
else:
_lowerCamelCase = 0.0
# The model.
_lowerCamelCase = input_state_dict["""model"""]
# The language model.
_lowerCamelCase = model["""language_model"""]
# The embeddings.
_lowerCamelCase = lm["""embedding"""]
# The word embeddings.
_lowerCamelCase = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_lowerCamelCase = word_embeddings[: config.vocab_size, :]
_lowerCamelCase = word_embeddings
# The position embeddings.
_lowerCamelCase = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
_lowerCamelCase = pos_embeddings
# The transformer.
_lowerCamelCase = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_lowerCamelCase = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_lowerCamelCase = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCamelCase = layer_re.match(snake_case )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCamelCase = int(m.group(1 ) )
# The name of the operation.
_lowerCamelCase = m.group(2 )
# Is it a weight or a bias?
_lowerCamelCase = m.group(3 )
# The name of the layer.
_lowerCamelCase = f'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_lowerCamelCase = """ln_1""" if op_name.startswith('input' ) else """ln_2"""
_lowerCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , snake_case , snake_case )
_lowerCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCamelCase = torch.tensor(-1e4 , dtype=torch.floataa )
_lowerCamelCase = masked_bias
_lowerCamelCase = fix_query_key_value_ordering(snake_case , snake_case , 3 , snake_case , snake_case )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowerCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCamelCase = fix_query_key_value_ordering(snake_case , snake_case , 3 , snake_case , snake_case )
# Store. No change of shape.
_lowerCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCamelCase = megatron_to_transformers[op_name]
_lowerCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCamelCase = megatron_to_transformers[op_name]
_lowerCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCamelCase = transformer["""final_layernorm.weight"""]
_lowerCamelCase = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCamelCase = word_embeddings
# It should be done!
return output_state_dict
def SCREAMING_SNAKE_CASE_ ( )-> Union[str, Any]:
# Create the argument parser.
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=snake_case , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=snake_case , help='An optional config json file describing the pre-trained model.' , )
_lowerCamelCase = parser.parse_args()
# Extract the basename.
_lowerCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_lowerCamelCase = torch.load(snake_case , map_location='cpu' )
else:
_lowerCamelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
_lowerCamelCase = input_state_dict.get('args' , snake_case )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCamelCase = """gelu_fast"""
elif ds_args.openai_gelu:
_lowerCamelCase = """gelu_new"""
else:
_lowerCamelCase = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_lowerCamelCase = """gelu_new"""
# Spell out all parameters in case the defaults change.
_lowerCamelCase = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=snake_case , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type='cls_index' , summary_use_proj=snake_case , summary_activation=snake_case , summary_proj_to_labels=snake_case , summary_first_dropout=0.1 , scale_attn_weights=snake_case , use_cache=snake_case , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_lowerCamelCase = GPTaConfig.from_json_file(args.config_file )
_lowerCamelCase = ["""GPT2LMHeadModel"""]
# Convert.
print('Converting' )
_lowerCamelCase = convert_megatron_checkpoint(snake_case , snake_case , snake_case )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case , snake_case )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCamelCase = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_lowerCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'Unrecognized tokenizer_type {tokenizer_type}' )
else:
_lowerCamelCase = """gpt2"""
_lowerCamelCase = AutoTokenizer.from_pretrained(snake_case )
_lowerCamelCase = type(snake_case ).__name__
_lowerCamelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(snake_case )
# Save tokenizer based on args
print(f'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(snake_case )
# Store the state_dict to file.
_lowerCamelCase = os.path.join(snake_case , 'pytorch_model.bin' )
print(f'Saving checkpoint to \"{output_checkpoint_file}\"' )
torch.save(snake_case , snake_case )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 650 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCAmelCase : Optional[Any] =datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 10_000
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ParquetConfig
def __magic_name__( self :Union[str, Any] ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = data_files
if isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[str] = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Union[str, Any] = [dl_manager.iter_files(__snake_case ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__snake_case ):
with open(__snake_case , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : List[Any] = datasets.Features.from_arrow_schema(pq.read_schema(__snake_case ) )
break
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={'''files''': files} ) )
return splits
def __magic_name__( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> List[str]:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : Union[str, Any] = table_cast(__snake_case , self.info.features.arrow_schema )
return pa_table
def __magic_name__( self :Tuple , lowerCAmelCase__ :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = pq.ParquetFile(__snake_case )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__snake_case )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__snake_case )}: {e}''' )
raise
| 696 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = sorted(numsa + numsa )
UpperCAmelCase = divmod(len(lowerCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Union[str, Any] = [float(x) for x in input('''Enter the elements of first array: ''').split()]
lowerCAmelCase_ : Dict = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 673 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 0 |
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> str:
'''simple docstring'''
A__ = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) )
A__ = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) )
A__ = 0, 0
A__ = n - i
A__ = memo.get(SCREAMING_SNAKE_CASE_ )
if sub_memo is not None:
A__ = sub_memo.get(SCREAMING_SNAKE_CASE_ )
if jumps is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
# find and make the largest jump without going over
A__ = -1
for _k in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ = _k
break
if max_jump >= 0:
A__ = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ = diff + c
for j in range(min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ):
A__ = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
A__ = []
else:
A__ = {c: []}
A__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ = next_term(SCREAMING_SNAKE_CASE_ , k - 1 , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ = compute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
A__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ = 0
while j < len(SCREAMING_SNAKE_CASE_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE_ , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Optional[int]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE_ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ = i
A__ = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ = ds_c + ds_b
diff += addend
A__ = 0
for j in range(SCREAMING_SNAKE_CASE_ ):
A__ = a_i[j] + addend
A__ = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return diff, i - start_i
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]:
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
A__ = digits[j] + addend
if s >= 1_0:
A__ = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
A__ = addend // 1_0 + quotient
else:
A__ = s
A__ = addend // 1_0
if addend == 0:
break
while addend > 0:
A__ = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
digits.append(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] = 1_0**1_5 ) -> Optional[Any]:
'''simple docstring'''
A__ = [1]
A__ = 1
A__ = 0
while True:
A__ = next_term(SCREAMING_SNAKE_CASE_ , 2_0 , i + dn , SCREAMING_SNAKE_CASE_ )
dn += terms_jumped
if dn == n - i:
break
A__ = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 514 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class snake_case__ ( UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE : str = "xlm-roberta-xl"
def __init__( self : Dict , A__ : Optional[Any]=25_08_80 , A__ : List[Any]=25_60 , A__ : Optional[Any]=36 , A__ : Any=32 , A__ : int=1_02_40 , A__ : List[Any]="gelu" , A__ : Union[str, Any]=0.1 , A__ : Optional[Any]=0.1 , A__ : str=5_14 , A__ : Union[str, Any]=1 , A__ : Optional[int]=0.02 , A__ : str=1E-05 , A__ : str=1 , A__ : int=0 , A__ : Tuple=2 , A__ : Optional[int]="absolute" , A__ : str=True , A__ : Any=None , **A__ : Dict , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
snake_case_ : List[str] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : List[str] = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Dict = position_embedding_type
snake_case_ : Any = use_cache
snake_case_ : Dict = classifier_dropout
class snake_case__ ( UpperCamelCase__ ):
@property
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 666 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowercase ( UpperCamelCase__ , UpperCamelCase__ ):
lowercase__ : Any = """nat"""
lowercase__ : Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , _UpperCamelCase : Tuple=4 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=64 , _UpperCamelCase : Optional[Any]=[3, 4, 6, 5] , _UpperCamelCase : Tuple=[2, 4, 8, 16] , _UpperCamelCase : Optional[int]=7 , _UpperCamelCase : Optional[int]=3.0 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(__snake_case )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__snake_case ) - 1) )
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )]
SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
| 403 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase ( lowerCAmelCase__="ro" ,lowerCAmelCase__="en" ,lowerCAmelCase__="wmt16" ,lowerCAmelCase__=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
lowerCamelCase_ = f"{src_lang}-{tgt_lang}"
print(f"Converting {dataset}-{pair}" )
lowerCamelCase_ = datasets.load_dataset(lowerCAmelCase__ ,lowerCAmelCase__ )
if save_dir is None:
lowerCamelCase_ = f"{dataset}-{pair}"
lowerCamelCase_ = Path(lowerCAmelCase__ )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
for split in ds.keys():
print(f"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
lowerCamelCase_ = """val""" if split == """validation""" else split
lowerCamelCase_ = save_dir.joinpath(f"{fn}.source" )
lowerCamelCase_ = save_dir.joinpath(f"{fn}.target" )
lowerCamelCase_ = src_path.open('''w+''' )
lowerCamelCase_ = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowerCamelCase_ = x["""translation"""]
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 29 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 0 |
"""simple docstring"""
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowerCAmelCase :Tuple = str(bin(a__ ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowerCAmelCase :List[Any] = str(bin(a__ ) )[2:]
if shift_amount >= len(a__ ):
return "0b0"
lowerCAmelCase :Tuple = binary_number[: len(a__ ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
lowerCAmelCase :List[str] = """0""" + str(bin(a__ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowerCAmelCase :str = len(bin(a__ )[3:] ) # Find 2's complement of number
lowerCAmelCase :List[Any] = bin(abs(a__ ) - (1 << binary_number_length) )[3:]
lowerCAmelCase :Union[str, Any] = (
"""1""" + """0""" * (binary_number_length - len(a__ )) + binary_number
)
if shift_amount >= len(a__ ):
return "0b" + binary_number[0] * len(a__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 553 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
snake_case_ : int = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_lowerCamelCase = 1
@register_to_config
def __init__( self : Any , lowerCamelCase : Tuple=2000 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Any=20 , lowerCamelCase : Optional[int]=1E-3 ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowerCamelCase ( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, torch.device] = None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def lowerCamelCase ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_UpperCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_UpperCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
_UpperCAmelCase = std.unsqueeze(-1 )
_UpperCAmelCase = -score / std
# compute
_UpperCAmelCase = -1.0 / len(self.timesteps )
_UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_UpperCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_UpperCAmelCase = beta_t.unsqueeze(-1 )
_UpperCAmelCase = -0.5 * beta_t * x
_UpperCAmelCase = torch.sqrt(__snake_case )
_UpperCAmelCase = drift - diffusion**2 * score
_UpperCAmelCase = x + drift * dt
# add noise
_UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
_UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps | 108 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ (__A : Union[str, Any] , __A : Union[str, Any] ) -> Optional[int]:
assert isinstance(__A , __A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case_ (__A : Union[str, Any] , __A : Optional[Any] , __A : Tuple ) -> Tuple:
__lowerCAmelCase : Dict = tmp_path / """cache"""
__lowerCAmelCase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : Dict = ParquetDatasetReader(__A , cache_dir=__A , keep_in_memory=__A ).read()
_check_parquet_dataset(__A , __A )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case_ (__A : Tuple , __A : List[Any] , __A : Optional[int] ) -> List[str]:
__lowerCAmelCase : Union[str, Any] = tmp_path / """cache"""
__lowerCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : List[str] = features.copy() if features else default_expected_features
__lowerCAmelCase : Any = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : Dict = ParquetDatasetReader(__A , features=__A , cache_dir=__A ).read()
_check_parquet_dataset(__A , __A )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case_ (__A : Tuple , __A : int , __A : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase : str = tmp_path / """cache"""
__lowerCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : Optional[Any] = ParquetDatasetReader(__A , cache_dir=__A , split=__A ).read()
_check_parquet_dataset(__A , __A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def snake_case_ (__A : int , __A : Any , __A : Tuple ) -> List[Any]:
if issubclass(__A , __A ):
__lowerCAmelCase : Dict = parquet_path
elif issubclass(__A , __A ):
__lowerCAmelCase : str = [parquet_path]
__lowerCAmelCase : str = tmp_path / """cache"""
__lowerCAmelCase : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : List[str] = ParquetDatasetReader(__A , cache_dir=__A ).read()
_check_parquet_dataset(__A , __A )
def snake_case_ (__A : int , __A : str , __A : Dict=("train",) ) -> Any:
assert isinstance(__A , __A )
for split in splits:
__lowerCAmelCase : Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case_ (__A : Optional[int] , __A : Optional[int] , __A : Any ) -> int:
__lowerCAmelCase : Optional[int] = tmp_path / """cache"""
__lowerCAmelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : str = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__A , keep_in_memory=__A ).read()
_check_parquet_datasetdict(__A , __A )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case_ (__A : Dict , __A : List[Any] , __A : int ) -> Tuple:
__lowerCAmelCase : Optional[int] = tmp_path / """cache"""
__lowerCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : List[str] = features.copy() if features else default_expected_features
__lowerCAmelCase : Optional[Any] = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : Union[str, Any] = ParquetDatasetReader({"""train""": parquet_path} , features=__A , cache_dir=__A ).read()
_check_parquet_datasetdict(__A , __A )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case_ (__A : str , __A : int , __A : str ) -> List[Any]:
if split:
__lowerCAmelCase : Optional[int] = {split: parquet_path}
else:
__lowerCAmelCase : int = """train"""
__lowerCAmelCase : Dict = {"""train""": parquet_path, """test""": parquet_path}
__lowerCAmelCase : Dict = tmp_path / """cache"""
__lowerCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : List[str] = ParquetDatasetReader(__A , cache_dir=__A ).read()
_check_parquet_datasetdict(__A , __A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case_ (__A : Any , __A : List[str] ) -> Optional[int]:
__lowerCAmelCase : str = ParquetDatasetWriter(__A , tmp_path / """foo.parquet""" )
assert writer.write() > 0
__lowerCAmelCase : Any = pq.ParquetFile(tmp_path / """foo.parquet""" )
__lowerCAmelCase : Optional[int] = pf.read()
assert dataset.data.table == output_table
def snake_case_ (__A : int , __A : Union[str, Any] ) -> int:
__lowerCAmelCase : str = str(shared_datadir / """test_image_rgb.jpg""" )
__lowerCAmelCase : Tuple = {"""image""": [image_path]}
__lowerCAmelCase : str = Features({"""image""": Image()} )
__lowerCAmelCase : Any = Dataset.from_dict(__A , features=__A )
__lowerCAmelCase : int = ParquetDatasetWriter(__A , tmp_path / """foo.parquet""" )
assert writer.write() > 0
__lowerCAmelCase : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
__lowerCAmelCase : Any = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__A ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def snake_case_ (__A : str , __A : Any ) -> Union[str, Any]:
assert get_writer_batch_size(__A ) == expected
| 651 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : List[Any] = None , snake_case : Optional[int] = None )-> int:
if start is None:
_lowerCamelCase = 0
if end is None:
_lowerCamelCase = len(snake_case ) - 1
if start >= end:
return
_lowerCamelCase = (start + end) // 2
slowsort(snake_case , snake_case , snake_case )
slowsort(snake_case , mid + 1 , snake_case )
if sequence[end] < sequence[mid]:
_lowerCamelCase = sequence[mid], sequence[end]
slowsort(snake_case , snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 650 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 0 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mf_knapsack(i - 1 , lowercase__ , lowercase__ , lowercase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = max(
mf_knapsack(i - 1 , lowercase__ , lowercase__ , lowercase__ ) , mf_knapsack(i - 1 , lowercase__ , lowercase__ , j - wt[i - 1] ) + val[i - 1] , )
__SCREAMING_SNAKE_CASE : Any = val
return f[i][j]
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__SCREAMING_SNAKE_CASE : Optional[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__SCREAMING_SNAKE_CASE : str = dp[i - 1][w_]
return dp[n][w_], dp
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if not (isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
if num_items != len(lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
"""The number of weights must be the same as the number of values.\n"""
F'''But got {num_items} weights and {len(lowercase__ )} values'''
)
raise ValueError(lowercase__ )
for i in range(lowercase__ ):
if not isinstance(wt[i] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = (
"""All weights must be integers but got weight of """
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : set = set()
_construct_solution(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return optimal_val, example_optional_set
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase__ , lowercase__ , i - 1 , lowercase__ , lowercase__ )
else:
optimal_set.add(lowercase__ )
_construct_solution(lowercase__ , lowercase__ , i - 1 , j - wt[i - 1] , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =[3, 2, 4, 4]
__lowerCAmelCase : str =[4, 3, 2, 3]
__lowerCAmelCase : Any =4
__lowerCAmelCase : List[str] =6
__lowerCAmelCase : Tuple =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__lowerCAmelCase : Dict =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__lowerCAmelCase : Any =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 696 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : str = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
print("""Loading config file...""" )
def flatten_yaml_as_dict(lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase , lowerCAmelCase , sep=lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase )
UpperCAmelCase = argparse.Namespace()
with open(lowerCAmelCase , """r""" ) as yaml_file:
try:
UpperCAmelCase = yaml.load(lowerCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(lowerCAmelCase , str(lowerCAmelCase ) ) )
return config
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
UpperCAmelCase = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
UpperCAmelCase = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = """ade20k-id2label.json"""
UpperCAmelCase = True
elif task_name.startswith("""voc_""" ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = """pascal-voc-id2label.json"""
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(lowerCAmelCase )
assert getattr(lowerCAmelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(lowerCAmelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(lowerCAmelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(lowerCAmelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(lowerCAmelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(lowerCAmelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
UpperCAmelCase = getattr(lowerCAmelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
UpperCAmelCase = getattr(lowerCAmelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = dct.pop(lowerCAmelCase )
UpperCAmelCase = val
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
if base_model:
UpperCAmelCase = """"""
else:
UpperCAmelCase = """mobilevitv2."""
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
UpperCAmelCase = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
UpperCAmelCase = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
UpperCAmelCase = k_new.replace("""conv_1.""" , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
UpperCAmelCase = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
UpperCAmelCase = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
UpperCAmelCase = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = get_mobilevitva_config(lowerCAmelCase , lowerCAmelCase )
# load original state_dict
UpperCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(lowerCAmelCase ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(lowerCAmelCase ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(lowerCAmelCase )
UpperCAmelCase = create_rename_keys(lowerCAmelCase , base_model=lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load modified state_dict
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase = model(**lowerCAmelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , lowerCAmelCase , atol=1e-4 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase_ : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 673 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a__ ( UpperCamelCase__ ):
"""simple docstring"""
__lowerCamelCase = 'nllb-moe'
__lowerCamelCase = ['past_key_values']
__lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase=128112 , lowercase=1024 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=0.05 , lowercase=0.05 , lowercase=True , lowercase=True , lowercase="relu" , lowercase=1024 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=False , lowercase="float32" , lowercase=False , lowercase=128 , lowercase=64 , lowercase=4 , lowercase=4 , lowercase=0.001 , lowercase=0.001 , lowercase="all" , lowercase=False , lowercase=False , lowercase=1.0 , lowercase=0.2 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=False , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
A__ = router_z_loss_coef
A__ = router_aux_loss_coef
A__ = decoder_sparse_step
A__ = encoder_sparse_step
A__ = num_experts
A__ = expert_capacity
A__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A__ = router_dtype
A__ = router_ignore_padding_tokens
A__ = batch_prioritized_routing
A__ = second_expert_policy
A__ = normalize_router_prob_before_dropping
A__ = moe_eval_capacity_token_fraction
A__ = moe_token_dropout
A__ = output_router_logits
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
| 514 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 0 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
snake_case_ : str = nums[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
snake_case_ : Any = nums[i]
snake_case_ : Dict = max(lowerCAmelCase_ , ans + num , lowerCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase = int(input("Enter number of elements : ").strip())
UpperCAmelCase = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 666 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , ):
SCREAMING_SNAKE_CASE = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase__ )
assert base_extractor.is_extractable(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE = file_path.read_text(encoding="utf-8" )
else:
SCREAMING_SNAKE_CASE = output_path.read_text(encoding="utf-8" )
SCREAMING_SNAKE_CASE = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , ):
SCREAMING_SNAKE_CASE = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
SCREAMING_SNAKE_CASE = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = Extractor.infer_extractor_format(UpperCAmelCase__ )
assert extractor_format is not None
SCREAMING_SNAKE_CASE = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE = file_path.read_text(encoding="utf-8" )
else:
SCREAMING_SNAKE_CASE = output_path.read_text(encoding="utf-8" )
SCREAMING_SNAKE_CASE = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ):
import tarfile
SCREAMING_SNAKE_CASE = tmp_path / """data_dot_dot"""
directory.mkdir()
SCREAMING_SNAKE_CASE = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(UpperCAmelCase__ , "w" ) as f:
f.add(UpperCAmelCase__ , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def __lowerCamelCase (UpperCAmelCase__ : str ):
import tarfile
SCREAMING_SNAKE_CASE = tmp_path / """data_sym_link"""
directory.mkdir()
SCREAMING_SNAKE_CASE = directory / """tar_file_with_sym_link.tar"""
os.symlink(".." , directory / "subdir" , target_is_directory=UpperCAmelCase__ )
with tarfile.TarFile(UpperCAmelCase__ , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE = tmp_path / """extracted"""
TarExtractor.extract(UpperCAmelCase__ , UpperCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
SCREAMING_SNAKE_CASE = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE = (
B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("wb" ) as f:
f.write(UpperCAmelCase__ )
assert zipfile.is_zipfile(str(UpperCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCAmelCase__ ) # but we're right
| 403 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 0 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def lowercase ( ):
lowerCamelCase_ = 10
lowerCamelCase_ = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
lowerCamelCase_ = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(lowerCAmelCase__ ) ),
} ,features=lowerCAmelCase__ ,)
return dataset
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=lowerCAmelCase__ )
return filename
# FILE_CONTENT + files
A_ = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.txt"""
lowerCamelCase_ = FILE_CONTENT
with open(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ )
return filename
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
import bza
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.txt.bz2"""
lowerCamelCase_ = bytes(lowerCAmelCase__ ,'''utf-8''' )
with bza.open(lowerCAmelCase__ ,'''wb''' ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
import gzip
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
lowerCamelCase_ = bytes(lowerCAmelCase__ ,'''utf-8''' )
with gzip.open(lowerCAmelCase__ ,'''wb''' ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.txt.lz4"""
lowerCamelCase_ = bytes(lowerCAmelCase__ ,'''utf-8''' )
with lza.frame.open(lowerCAmelCase__ ,'''wb''' ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCAmelCase__ ,'''w''' ) as archive:
archive.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
import tarfile
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.txt.tar"""
with tarfile.TarFile(lowerCAmelCase__ ,'''w''' ) as f:
f.add(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
import lzma
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.txt.xz"""
lowerCamelCase_ = bytes(lowerCAmelCase__ ,'''utf-8''' )
with lzma.open(lowerCAmelCase__ ,'''wb''' ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
import zipfile
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.txt.zst"""
lowerCamelCase_ = bytes(lowerCAmelCase__ ,'''utf-8''' )
with zstd.open(lowerCAmelCase__ ,'''wb''' ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """file.xml"""
lowerCamelCase_ = textwrap.dedent(
'''\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ )
return filename
A_ = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
A_ = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
A_ = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
A_ = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
A_ = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def lowercase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = datasets.Dataset.from_dict(lowerCAmelCase__ )
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(lowerCAmelCase__ ) ) as con:
lowerCamelCase_ = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' ,tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(lowerCAmelCase__ ,'''w''' ,newline='''''' ) as f:
lowerCamelCase_ = csv.DictWriter(lowerCAmelCase__ ,fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(lowerCAmelCase__ ,'''w''' ,newline='''''' ) as f:
lowerCamelCase_ = csv.DictWriter(lowerCAmelCase__ ,fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
import bza
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.csv.bz2"""
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCAmelCase__ ,'''wb''' ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.basename(csv_path.replace('''.csv''' ,'''.CSV''' ) ) )
f.write(lowerCAmelCase__ ,arcname=os.path.basename(csva_path.replace('''.csv''' ,'''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.join('''main_dir''' ,os.path.basename(lowerCAmelCase__ ) ) )
f.write(lowerCAmelCase__ ,arcname=os.path.join('''main_dir''' ,os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
lowerCamelCase_ = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(lowerCAmelCase__ ,'''wb''' ) as f:
lowerCamelCase_ = pq.ParquetWriter(lowerCAmelCase__ ,schema=lowerCAmelCase__ )
lowerCamelCase_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCAmelCase__ ) )] for k in DATA[0]} ,schema=lowerCAmelCase__ )
writer.write_table(lowerCAmelCase__ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowerCamelCase_ = {"""data""": DATA}
with open(lowerCAmelCase__ ,'''w''' ) as f:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowerCamelCase_ = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCAmelCase__ ,'''w''' ) as f:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(lowerCAmelCase__ ,'''w''' ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(lowerCAmelCase__ ,'''w''' ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(lowerCAmelCase__ ,'''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(lowerCAmelCase__ ,'''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
import gzip
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(lowerCAmelCase__ ,'''rb''' ) as orig_file:
with gzip.open(lowerCAmelCase__ ,'''wb''' ) as zipped_file:
zipped_file.writelines(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
import gzip
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(lowerCAmelCase__ ,'''rb''' ) as orig_file:
with gzip.open(lowerCAmelCase__ ,'''wb''' ) as zipped_file:
zipped_file.writelines(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.join('''nested''' ,os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.join('''main_dir''' ,os.path.basename(lowerCAmelCase__ ) ) )
f.write(lowerCAmelCase__ ,arcname=os.path.join('''main_dir''' ,os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCAmelCase__ ,'''w''' ) as f:
f.add(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
f.add(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCAmelCase__ ,'''w''' ) as f:
f.add(lowerCAmelCase__ ,arcname=os.path.join('''nested''' ,os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = ["""0""", """1""", """2""", """3"""]
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(lowerCAmelCase__ ,'''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = ["""0""", """1""", """2""", """3"""]
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(lowerCAmelCase__ ,'''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = ["""0""", """1""", """2""", """3"""]
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.abc"""
with open(lowerCAmelCase__ ,'''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.join('''main_dir''' ,os.path.basename(lowerCAmelCase__ ) ) )
f.write(lowerCAmelCase__ ,arcname=os.path.join('''main_dir''' ,os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.basename('''unsupported.ext''' ) )
f.write(lowerCAmelCase__ ,arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = """\n""".join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
lowerCamelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(lowerCAmelCase__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( ):
return os.path.join('''tests''' ,'''features''' ,'''data''' ,'''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def lowercase ( ):
return os.path.join('''tests''' ,'''features''' ,'''data''' ,'''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCAmelCase__ ,'''w''' ) as f:
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ) )
f.write(lowerCAmelCase__ ,arcname=os.path.basename(lowerCAmelCase__ ).replace('''.jpg''' ,'''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' ,'''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' ,'''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' ,'''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' ,'''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' ,'''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 29 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__SCREAMING_SNAKE_CASE = parser.parse_args()
if args.model_type == "bert":
__SCREAMING_SNAKE_CASE = BertForMaskedLM.from_pretrained(args.model_name)
__SCREAMING_SNAKE_CASE = "bert"
else:
raise ValueError('args.model_type should be \"bert\".')
__SCREAMING_SNAKE_CASE = model.state_dict()
__SCREAMING_SNAKE_CASE = {}
for w in ["word_embeddings", "position_embeddings"]:
__SCREAMING_SNAKE_CASE = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__SCREAMING_SNAKE_CASE = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
__SCREAMING_SNAKE_CASE = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__SCREAMING_SNAKE_CASE = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__SCREAMING_SNAKE_CASE = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__SCREAMING_SNAKE_CASE = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__SCREAMING_SNAKE_CASE = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__SCREAMING_SNAKE_CASE = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__SCREAMING_SNAKE_CASE = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__SCREAMING_SNAKE_CASE = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__SCREAMING_SNAKE_CASE = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__SCREAMING_SNAKE_CASE = state_dict["cls.predictions.decoder.weight"]
__SCREAMING_SNAKE_CASE = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
__SCREAMING_SNAKE_CASE = state_dict[F"""cls.predictions.transform.dense.{w}"""]
__SCREAMING_SNAKE_CASE = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint) | 553 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 0 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __lowercase ( UpperCamelCase__):
"""simple docstring"""
_A : str = """autoformer"""
_A : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self , lowercase__ = None , lowercase__ = None , lowercase__ = "student_t" , lowercase__ = "nll" , lowercase__ = 1 , lowercase__ = [1, 2, 3, 4, 5, 6, 7] , lowercase__ = True , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = 64 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 32 , lowercase__ = 32 , lowercase__ = "gelu" , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 1_00 , lowercase__ = 0.02 , lowercase__ = True , lowercase__=True , lowercase__ = 10 , lowercase__ = 25 , lowercase__ = 3 , **lowercase__ , ):
snake_case_ : Tuple = prediction_length
snake_case_ : Any = context_length if context_length is not None else prediction_length
snake_case_ : Union[str, Any] = distribution_output
snake_case_ : int = loss
snake_case_ : Dict = input_size
snake_case_ : int = num_time_features
snake_case_ : List[Any] = lags_sequence
snake_case_ : Optional[int] = scaling
snake_case_ : Optional[int] = num_dynamic_real_features
snake_case_ : str = num_static_real_features
snake_case_ : Optional[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ : List[Any] = cardinality
else:
snake_case_ : Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ : Any = embedding_dimension
else:
snake_case_ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ : Any = num_parallel_samples
# Transformer architecture configuration
snake_case_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ : str = d_model
snake_case_ : Optional[int] = encoder_attention_heads
snake_case_ : Tuple = decoder_attention_heads
snake_case_ : int = encoder_ffn_dim
snake_case_ : List[str] = decoder_ffn_dim
snake_case_ : List[Any] = encoder_layers
snake_case_ : Dict = decoder_layers
snake_case_ : str = dropout
snake_case_ : str = attention_dropout
snake_case_ : Optional[int] = activation_dropout
snake_case_ : int = encoder_layerdrop
snake_case_ : Optional[int] = decoder_layerdrop
snake_case_ : List[str] = activation_function
snake_case_ : str = init_std
snake_case_ : Optional[int] = use_cache
# Autoformer
snake_case_ : Any = label_length
snake_case_ : Tuple = moving_average
snake_case_ : Dict = autocorrelation_factor
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def __UpperCamelCase (self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 480 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
_UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_UpperCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("""RGB""" )
return image
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[Any]:
_UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Dict:
_UpperCAmelCase = dct.pop(__snake_case )
_UpperCAmelCase = val
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
_UpperCAmelCase = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
_UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) )
_UpperCAmelCase = qkv_bias
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> List[str]:
_UpperCAmelCase = 3_6_4 if """coco""" in model_name else 2_2_4
_UpperCAmelCase = BlipaVisionConfig(image_size=__snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=__snake_case ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=__snake_case ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_UpperCAmelCase = BlipaConfig(vision_config=__snake_case , text_config=__snake_case )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=None , __snake_case=False ) -> Optional[Any]:
_UpperCAmelCase = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_UpperCAmelCase = tokenizer("""\n""" , add_special_tokens=__snake_case ).input_ids[0]
_UpperCAmelCase = get_blipa_config(__snake_case , eos_token_id=__snake_case )
_UpperCAmelCase = BlipaForConditionalGeneration(__snake_case ).eval()
_UpperCAmelCase = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_UpperCAmelCase = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
_UpperCAmelCase = load_model_and_preprocess(
name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case )
original_model.eval()
print("""Done!""" )
# update state dict keys
_UpperCAmelCase = original_model.state_dict()
_UpperCAmelCase = create_rename_keys(__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase = state_dict.pop(__snake_case )
if key.startswith("""Qformer.bert""" ):
_UpperCAmelCase = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_UpperCAmelCase = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_UpperCAmelCase = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_UpperCAmelCase = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_UpperCAmelCase = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_UpperCAmelCase = key.replace("""t5""" , """language""" )
_UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(__snake_case , __snake_case )
_UpperCAmelCase = hf_model.load_state_dict(__snake_case , strict=__snake_case )
assert len(__snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase = load_demo_image()
_UpperCAmelCase = vis_processors["""eval"""](__snake_case ).unsqueeze(0 ).to(__snake_case )
_UpperCAmelCase = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(__snake_case )
# create processor
_UpperCAmelCase = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__snake_case , image_std=__snake_case )
_UpperCAmelCase = BlipaProcessor(image_processor=__snake_case , tokenizer=__snake_case )
_UpperCAmelCase = processor(images=__snake_case , return_tensors="""pt""" ).pixel_values.to(__snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(__snake_case , __snake_case )
original_model.to(__snake_case )
hf_model.to(__snake_case )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_UpperCAmelCase = hf_model(__snake_case , __snake_case ).logits
else:
_UpperCAmelCase = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_UpperCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
_UpperCAmelCase = hf_model(__snake_case , __snake_case , labels=__snake_case ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4440, -8.9922], [-4_7.4_3_2_2, -5.9143, -1.7340]] , device=__snake_case )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8967, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__snake_case )
else:
# cast to same type
_UpperCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(__snake_case ) , __snake_case , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_UpperCAmelCase = """"""
_UpperCAmelCase = tokenizer(__snake_case , return_tensors="""pt""" ).input_ids.to(__snake_case )
_UpperCAmelCase = original_model.generate({"""image""": original_pixel_values} )
_UpperCAmelCase = hf_model.generate(
__snake_case , __snake_case , do_sample=__snake_case , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , __snake_case )
_UpperCAmelCase = input_ids.shape[1]
_UpperCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__snake_case )
_UpperCAmelCase = [text.strip() for text in output_text]
print("""HF generation:""" , __snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__snake_case )
hf_model.save_pretrained(__snake_case )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
__a: Dict = argparse.ArgumentParser()
__a: Dict = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__a: Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 108 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str =BlenderbotConfig
lowerCamelCase : Union[str, Any] ={}
lowerCamelCase : Optional[Any] ="gelu"
def __init__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Dict=99 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : List[str]=37 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Union[str, Any]=20 , lowerCAmelCase : int=2 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Any=0 , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Tuple = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : int = eos_token_id
__lowerCAmelCase : Optional[int] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase : List[str] = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = TFBlenderbotModel(config=__snake_case ).get_decoder()
__lowerCAmelCase : Union[str, Any] = inputs_dict["""input_ids"""]
__lowerCAmelCase : Any = input_ids[:1, :]
__lowerCAmelCase : List[str] = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase : Dict = inputs_dict["""head_mask"""]
__lowerCAmelCase : Optional[Any] = 1
# first forward pass
__lowerCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case )
__lowerCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case )[0]
__lowerCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-3 )
def snake_case_ (__A : Dict , __A : List[str] , __A : Optional[int] , __A : Any=None , __A : int=None , __A : int=None , __A : Union[str, Any]=None , __A : List[Any]=None , ) -> Tuple:
if attention_mask is None:
__lowerCAmelCase : Optional[int] = tf.cast(tf.math.not_equal(__A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCamelCase : List[Any] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : List[Any] =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : Optional[Any] =True
lowerCamelCase : Dict =False
lowerCamelCase : List[Any] =False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = TFBlenderbotModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=__snake_case )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =["My friends are cool but they eat too many carbs."]
lowerCamelCase : Dict ="facebook/blenderbot-400M-distill"
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.tokenizer(self.src_text , return_tensors="""tf""" )
__lowerCAmelCase : Optional[int] = self.model.generate(
model_inputs.input_ids , )
__lowerCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 651 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple )-> Optional[Any]:
def decorator(snake_case : List[str] ):
_lowerCamelCase = getattr(snake_case , 'handle_key' , [] )
handle += [key]
setattr(snake_case , 'handle_key' , snake_case )
return func
return decorator
def SCREAMING_SNAKE_CASE_ ( *snake_case : str )-> Union[str, Any]:
def decorator(snake_case : int ):
_lowerCamelCase = getattr(snake_case , 'handle_key' , [] )
handle += keys
setattr(snake_case , 'handle_key' , snake_case )
return func
return decorator
class __a ( UpperCamelCase__ ):
def __new__( cls , a__ , a__ , a__ ):
_lowerCamelCase = super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , 'key_handler' ):
setattr(__snake_case , 'key_handler' , {} )
setattr(__snake_case , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
_lowerCamelCase = getattr(__snake_case , 'handle_key' , [] )
for key in handled_keys:
_lowerCamelCase = value
return new_cls
@staticmethod
def snake_case_ ( cls ):
_lowerCamelCase = get_character()
if char != KEYMAP["undefined"]:
_lowerCamelCase = ord(__snake_case )
_lowerCamelCase = cls.key_handler.get(__snake_case )
if handler:
_lowerCamelCase = char
return handler(cls )
else:
return None
def SCREAMING_SNAKE_CASE_ ( cls : List[str] )-> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 650 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
import argparse
import copy
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = {}
with open(lowercase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
_list.append([line.split()[1], line.split()[2]] )
__SCREAMING_SNAKE_CASE : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__SCREAMING_SNAKE_CASE : List[Any] = []
_list.append([line.split()[0], line.split()[2]] )
__SCREAMING_SNAKE_CASE : Tuple = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _UpperCamelCase ( lowercase__ , lowercase__ ):
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = f.read(1 )
__SCREAMING_SNAKE_CASE : Any = start_node
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Dict = start_node
__SCREAMING_SNAKE_CASE : List[Any] = 0
while visiting not in first_solution:
__SCREAMING_SNAKE_CASE : Dict = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowercase__ ) and k[0] not in first_solution:
__SCREAMING_SNAKE_CASE : Tuple = k[1]
__SCREAMING_SNAKE_CASE : str = k[0]
first_solution.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = distance_of_first_solution + int(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = best_node
first_solution.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for n in solution[1:-1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = solution.index(lowercase__ )
for kn in solution[1:-1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = solution.index(lowercase__ )
if n == kn:
continue
__SCREAMING_SNAKE_CASE : str = copy.deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = kn
__SCREAMING_SNAKE_CASE : List[str] = n
__SCREAMING_SNAKE_CASE : List[Any] = 0
for k in _tmp[:-1]:
__SCREAMING_SNAKE_CASE : Optional[int] = _tmp[_tmp.index(lowercase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__SCREAMING_SNAKE_CASE : int = distance + int(i[1] )
_tmp.append(lowercase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__SCREAMING_SNAKE_CASE : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowercase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : List[str] = first_solution
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Dict = distance_of_first_solution
__SCREAMING_SNAKE_CASE : Union[str, Any] = solution
while count <= iters:
__SCREAMING_SNAKE_CASE : Dict = find_neighborhood(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Any = neighborhood[index_of_best_solution]
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase__ ) - 1
__SCREAMING_SNAKE_CASE : List[str] = False
while not found:
__SCREAMING_SNAKE_CASE : Any = 0
while i < len(lowercase__ ):
if best_solution[i] != solution[i]:
__SCREAMING_SNAKE_CASE : Any = best_solution[i]
__SCREAMING_SNAKE_CASE : Optional[Any] = solution[i]
break
__SCREAMING_SNAKE_CASE : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : List[str] = best_solution[:-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__SCREAMING_SNAKE_CASE : List[Any] = cost
__SCREAMING_SNAKE_CASE : List[Any] = solution
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = index_of_best_solution + 1
__SCREAMING_SNAKE_CASE : List[str] = neighborhood[index_of_best_solution]
if len(lowercase__ ) >= size:
tabu_list.pop(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = count + 1
return best_solution_ever, best_cost
def _UpperCamelCase ( lowercase__=None ):
__SCREAMING_SNAKE_CASE : int = generate_neighbours(args.File )
__SCREAMING_SNAKE_CASE : str = generate_first_solution(
args.File , lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tabu_search(
lowercase__ , lowercase__ , lowercase__ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 696 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : int = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class A ( _a ):
lowercase_ = 'efficientnet'
def __init__( self : str , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 6_00 , lowerCAmelCase_ : float = 2.0 , lowerCAmelCase_ : float = 3.1 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCAmelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCAmelCase_ : List[int] = [] , lowerCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : float = 0.2_5 , lowerCAmelCase_ : str = "swish" , lowerCAmelCase_ : int = 25_60 , lowerCAmelCase_ : str = "mean" , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 0.0_0_1 , lowerCAmelCase_ : float = 0.9_9 , lowerCAmelCase_ : float = 0.5 , lowerCAmelCase_ : float = 0.2 , **lowerCAmelCase_ : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = num_channels
_a = image_size
_a = width_coefficient
_a = depth_coefficient
_a = depth_divisor
_a = kernel_sizes
_a = in_channels
_a = out_channels
_a = depthwise_padding
_a = strides
_a = num_block_repeats
_a = expand_ratios
_a = squeeze_expansion_ratio
_a = hidden_act
_a = hidden_dim
_a = pooling_type
_a = initializer_range
_a = batch_norm_eps
_a = batch_norm_momentum
_a = dropout_rate
_a = drop_connect_rate
_a = sum(lowerCAmelCase_ ) * 4
class A ( _a ):
lowercase_ = version.parse('1.11' )
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : Dict ) -> float:
"""simple docstring"""
return 1e-5
| 22 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 1 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A ( _a ,unittest.TestCase ):
lowercase_ = XLMProphetNetTokenizer
lowercase_ = False
lowercase_ = True
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_a = '''[PAD]'''
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowerCAmelCase_ ) , 10_12 )
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_a = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_a = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a = '''Hello World!'''
_a = [3_53_89, 66_72, 49, 2]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_a = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 22 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 1 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (UpperCamelCase : List[DatasetType] , UpperCamelCase : Optional[List[float]] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[DatasetInfo] = None , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(UpperCamelCase ):
if not isinstance(UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(UpperCamelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase ).__name__}.' )
if i == 0:
_a , _a = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase , UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase , UpperCamelCase , UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , stopping_strategy=UpperCamelCase )
else:
return _interleave_iterable_datasets(
UpperCamelCase , UpperCamelCase , UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , stopping_strategy=UpperCamelCase )
def snake_case_ (UpperCamelCase : List[DatasetType] , UpperCamelCase : Optional[DatasetInfo] = None , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(UpperCamelCase ):
if not isinstance(UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(UpperCamelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase ).__name__}.' )
if i == 0:
_a , _a = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase , UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , axis=UpperCamelCase )
else:
return _concatenate_iterable_datasets(UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , axis=UpperCamelCase )
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import numpy as np
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : float = 1e-12 , UpperCamelCase : int = 100 , ):
'''simple docstring'''
assert np.shape(UpperCamelCase )[0] == np.shape(UpperCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCamelCase )[0] == np.shape(UpperCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCamelCase ) == np.iscomplexobj(UpperCamelCase )
_a = np.iscomplexobj(UpperCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a = False
_a = 0
_a = 0
_a = 1e12
while not convergence:
# Multiple matrix by the vector.
_a = np.dot(UpperCamelCase , UpperCamelCase )
# Normalize the resulting output vector.
_a = w / np.linalg.norm(UpperCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a = vector.conj().T if is_complex else vector.T
_a = np.dot(UpperCamelCase , np.dot(UpperCamelCase , UpperCamelCase ) )
# Check convergence.
_a = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a = True
_a = lambda_
if is_complex:
_a = np.real(lambda_ )
return lambda_, vector
def snake_case_ ():
'''simple docstring'''
_a = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a = np.array([41, 4, 20] )
_a = real_input_matrix.astype(np.complexaaa )
_a = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a = real_input_matrix
_a = real_vector
elif problem_type == "complex":
_a = complex_input_matrix
_a = complex_vector
# Our implementation.
_a , _a = power_iteration(UpperCamelCase , UpperCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a = np.linalg.eigh(UpperCamelCase )
# Last eigenvalue is the maximum one.
_a = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCamelCase ) - np.abs(UpperCamelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
_a = os.path.join(args.tf_model_dir , '''parameters.json''' )
_a = json.loads(open(UpperCamelCase ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
_a = args.output + '''.pt'''
_a = OrderedDict()
with tf.device('''/CPU:0''' ):
_a = tf.train.load_checkpoint(args.tf_model_dir )
_a = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_a = reader.get_tensor(UpperCamelCase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
_a = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
_a = 8
_a = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/moe''' ):
_a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
_a = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/softmlp/kernel''' ):
_a = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
_a = key_name[-9:-7]
for i in range(16 ):
_a = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
_a = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/mlp''' ):
_a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
_a = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p1/bias''' ):
_a = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p2/kernel''' ):
_a = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p2/bias''' ):
_a = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/ln''' ):
_a = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_a = '''model.blocks.%d.feed_forward.norm.bias''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/g''' ):
_a = '''model.blocks.%d.feed_forward.norm.weight''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/att''' ):
_a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
_a = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_a = state[:, 0, :, :]
_a = state[:, 1, :, :]
_a = state[:, 2, :, :]
_a = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
_a = torch.tensor(UpperCamelCase )
_a = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
_a = torch.tensor(UpperCamelCase )
_a = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/o/kernel''' ):
_a = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
_a = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/an''' ):
_a = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_a = '''model.blocks.%d.self_attn.norm.bias''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/g''' ):
_a = '''model.blocks.%d.self_attn.norm.weight''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
_a = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
_a = '''model.%s.weight''' % nlayer
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase )
if key_name.startswith('''model/wte''' ):
_a = '''lm_head.weight'''
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/wob''' ):
_a = '''final_logits_bias'''
_a = vnp.copy() # same in embedded
_a = state.reshape((1, -1) )
_a = torch.tensor(UpperCamelCase )
elif key_name == "model/dense/kernel":
_a = '''model.last_project.weight'''
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name == "model/dense_1/bias":
_a = '''model.last_project.bias'''
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
torch.save(UpperCamelCase , args.output )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_snake_case : List[str] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case : str = ['small', 'medium', 'large']
_snake_case : Any = 'lm_head.decoder.weight'
_snake_case : int = 'lm_head.weight'
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = torch.load(UpperCamelCase )
_a = d.pop(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
torch.save(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_snake_case : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case : Optional[Any] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
_snake_case : Any = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 22 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A ( _a ):
lowercase_ = 'MCTCTFeatureExtractor'
lowercase_ = 'AutoTokenizer'
def __init__( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.feature_extractor
_a = False
def __call__( self : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_a = kwargs.pop('''raw_speech''' )
else:
_a = kwargs.pop('''audio''' , lowerCAmelCase_ )
_a = kwargs.pop('''sampling_rate''' , lowerCAmelCase_ )
_a = kwargs.pop('''text''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_a = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None:
_a = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self : str , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : str ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = kwargs.pop('''input_features''' , lowerCAmelCase_ )
_a = kwargs.pop('''labels''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is not None:
_a = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels['''input_ids''']
return input_features
def __lowerCAmelCase ( self : str , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@contextmanager
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 22 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : set ):
'''simple docstring'''
_a , _a = len(UpperCamelCase ), len(grid[0] )
if (
min(UpperCamelCase , UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a = 0
count += depth_first_search(UpperCamelCase , row + 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , row - 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col + 1 , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col - 1 , UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A ( _a ):
lowercase_ = 'megatron-bert'
def __init__( self : str , lowerCAmelCase_ : Optional[int]=2_90_56 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : Any=24 , lowerCAmelCase_ : List[Any]=16 , lowerCAmelCase_ : Any=40_96 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Dict=5_12 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : List[str]=1e-12 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : List[str]="absolute" , lowerCAmelCase_ : Union[str, Any]=True , **lowerCAmelCase_ : str , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
| 22 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , UpperCamelCase )
_a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
_a = dataset_size < in_memory_max_size
else:
_a = False
_a = is_small_dataset(UpperCamelCase )
assert result == expected
| 22 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 1 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Dict ) -> Any:
"""simple docstring"""
_a = []
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
return self.node_position[vertex]
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_a = pos
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_a = 2 * start + 1
else:
_a = 2 * start + 2
if heap[smallest_child] < heap[start]:
_a , _a = heap[smallest_child], positions[smallest_child]
_a , _a = (
heap[start],
positions[start],
)
_a , _a = temp, tempa
_a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCAmelCase_ )
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any ) -> Tuple:
"""simple docstring"""
_a = position[index]
while index != 0:
_a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_a = heap[parent]
_a = position[parent]
self.set_position(position[parent] , lowerCAmelCase_ )
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , lowerCAmelCase_ )
break
_a = parent
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , 0 )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
_a = len(lowerCAmelCase_ ) // 2 - 1
for i in range(lowerCAmelCase_ , -1 , -1 ):
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
_a = positions[0]
_a = sys.maxsize
self.top_to_bottom(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return temp
def snake_case_ (UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = Heap()
_a = [0] * len(UpperCamelCase )
_a = [-1] * len(UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_a = [] # Heap of Distance of vertices from their neighboring vertex
_a = []
for vertex in range(len(UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase )
heap.node_position.append(UpperCamelCase )
_a = []
_a = 1
_a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_a = 0
_a = distance
heap.heapify(UpperCamelCase , UpperCamelCase )
for _ in range(1 , len(UpperCamelCase ) ):
_a = heap.delete_minimum(UpperCamelCase , UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase )]
):
_a = distance
heap.bottom_to_top(
UpperCamelCase , heap.get_position(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
_a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_snake_case : Optional[Any] = int(input('Enter number of edges: ').strip())
_snake_case : List[str] = defaultdict(list)
for _ in range(edges_number):
_snake_case : List[str] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 22 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : str = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class A ( _a ):
lowercase_ = 'altclip_text_model'
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict=25_00_02 , lowerCAmelCase_ : int=10_24 , lowerCAmelCase_ : Any=24 , lowerCAmelCase_ : Tuple=16 , lowerCAmelCase_ : List[str]=40_96 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=5_14 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Tuple=0.0_2 , lowerCAmelCase_ : List[Any]=1e-05 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]="absolute" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[int]=7_68 , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = initializer_factor
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = project_dim
class A ( _a ):
lowercase_ = 'altclip_vision_model'
def __init__( self : Any , lowerCAmelCase_ : Optional[int]=7_68 , lowerCAmelCase_ : Tuple=30_72 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Union[str, Any]=12 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : str=2_24 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]="quick_gelu" , lowerCAmelCase_ : Optional[int]=1e-5 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Tuple=0.0_2 , lowerCAmelCase_ : Tuple=1.0 , **lowerCAmelCase_ : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = hidden_size
_a = intermediate_size
_a = projection_dim
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = patch_size
_a = image_size
_a = initializer_range
_a = initializer_factor
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
@classmethod
def __lowerCAmelCase ( cls : int , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase_ )
_a , _a = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
_a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class A ( _a ):
lowercase_ = 'altclip'
lowercase_ = True
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[int]=7_68 , lowerCAmelCase_ : List[Any]=2.6_5_9_2 , **lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = kwargs.pop('''text_config_dict''' , lowerCAmelCase_ )
_a = kwargs.pop('''vision_config_dict''' , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_a = {}
# This is the complete result when using `text_config_dict`.
_a = AltCLIPTextConfig(**lowerCAmelCase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_a = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
_a = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(lowerCAmelCase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_a = {}
# This is the complete result when using `vision_config_dict`.
_a = AltCLIPVisionConfig(**lowerCAmelCase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_a = {
str(lowerCAmelCase_ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_a = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
_a = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(lowerCAmelCase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_a = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
_a = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
_a = AltCLIPTextConfig(**lowerCAmelCase_ )
_a = AltCLIPVisionConfig(**lowerCAmelCase_ )
_a = projection_dim
_a = logit_scale_init_value
_a = 1.0
@classmethod
def __lowerCAmelCase ( cls : List[Any] , lowerCAmelCase_ : AltCLIPTextConfig , lowerCAmelCase_ : AltCLIPVisionConfig , **lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.text_config.to_dict()
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_a = True if '''large''' in model_name or '''huge''' in model_name else False
_a = True if '''large''' in model_name or '''huge''' in model_name else False
_a = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_a = [3, 3, 3, 3]
_a = [5, 5, 5, 5]
elif "fl4" in model_name:
_a = [4, 4, 4, 4]
_a = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_a = [3, 3, 3, 3]
if "lrf" in model_name:
_a = [3, 3, 3, 3]
else:
_a = [2, 2, 2, 2]
if "tiny" in model_name:
_a = 96
elif "small" in model_name:
_a = 96
elif "base" in model_name:
_a = 128
elif "large" in model_name:
_a = 192
elif "xlarge" in model_name:
_a = 256
elif "huge" in model_name:
_a = 352
# set label information
_a = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_a = '''imagenet-22k-id2label.json'''
else:
_a = '''imagenet-1k-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = {v: k for k, v in idalabel.items()}
_a = FocalNetConfig(
embed_dim=UpperCamelCase , depths=UpperCamelCase , focal_levels=UpperCamelCase , focal_windows=UpperCamelCase , use_conv_embed=UpperCamelCase , idalabel=UpperCamelCase , labelaid=UpperCamelCase , use_post_layernorm=UpperCamelCase , use_layerscale=UpperCamelCase , )
return config
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_a = '''encoder.''' + name
if "encoder.layers" in name:
_a = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_a = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_a = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_a = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_a = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_a = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_a = '''layernorm.weight'''
if name == "norm.bias":
_a = '''layernorm.bias'''
if "head" in name:
_a = name.replace('''head''' , '''classifier''' )
else:
_a = '''focalnet.''' + name
return name
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
_a = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_a = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , UpperCamelCase )
_a = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_a = state_dict.pop(UpperCamelCase )
_a = val
_a = get_focalnet_config(UpperCamelCase )
_a = FocalNetForImageClassification(UpperCamelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase )
# verify conversion
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = BitImageProcessor(
do_resize=UpperCamelCase , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase , crop_size=224 , do_normalize=UpperCamelCase , image_mean=UpperCamelCase , image_std=UpperCamelCase , )
_a = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
_a = processor(images=UpperCamelCase , return_tensors='''pt''' )
_a = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_a = image_transforms(UpperCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase , atol=1e-4 )
_a = model(**UpperCamelCase )
_a = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_a = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
_a = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
_a = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
_a = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
_a = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
_a = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
_snake_case : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 22 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A :
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = 13
_a = 7
_a = True
_a = True
_a = True
_a = 99
_a = 32
_a = 2
_a = 4
_a = 37
_a = '''gelu'''
_a = 0.1
_a = 0.1
_a = 5_12
_a = 16
_a = 2
_a = 0.0_2
_a = 3
_a = 4
_a = None
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.prepare_config_and_inputs()
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
_a = TFEsmModel(config=lowerCAmelCase_ )
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_a = model(lowerCAmelCase_ )
_a = [input_ids, input_mask]
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , ) -> Optional[int]:
"""simple docstring"""
_a = True
_a = TFEsmModel(config=lowerCAmelCase_ )
_a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
_a = model(lowerCAmelCase_ )
_a = [input_ids, input_mask]
_a = model(lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ )
# Also check the case where encoder outputs are not passed
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
_a = TFEsmForMaskedLM(config=lowerCAmelCase_ )
_a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> int:
"""simple docstring"""
_a = self.num_labels
_a = TFEsmForTokenClassification(config=lowerCAmelCase_ )
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = TFEsmModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFEsmModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_a = model.get_bias()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
for k, v in name.items():
assert isinstance(lowerCAmelCase_ , tf.Variable )
else:
_a = model.get_output_embeddings()
assert x is None
_a = model.get_bias()
assert name is None
@require_tf
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_a = tf.constant([[0, 1, 2, 3, 4, 5]] )
_a = model(lowerCAmelCase_ )[0]
_a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCAmelCase_ )
# compare the actual values for a slice.
_a = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_a = model(lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_a = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 22 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : List[Any] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( _a ):
@staticmethod
@abstractmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
| 22 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 1 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 1 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = model.config
_a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_a = MBartConfig(
is_decoder=UpperCamelCase , is_encoder_decoder=UpperCamelCase , add_cross_attention=UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase , add_final_layer_norm=UpperCamelCase , )
return encoder_config, decoder_config
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
if "encoder.model" in name:
_a = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_a = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_a = '''encoder.''' + name
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_a = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_a = '''encoder.layernorm.bias'''
return name
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
_a = key.split('''.''' )
_a = int(key_split[3] )
_a = int(key_split[5] )
_a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_a = val
return orig_state_dict
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Tuple=None , UpperCamelCase : List[str]=False ):
'''simple docstring'''
_a = DonutModel.from_pretrained(UpperCamelCase ).eval()
# load HuggingFace model
_a , _a = get_configs(UpperCamelCase )
_a = DonutSwinModel(UpperCamelCase )
_a = MBartForCausalLM(UpperCamelCase )
_a = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
_a = original_model.state_dict()
_a = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# verify results on scanned document
_a = load_dataset('''hf-internal-testing/example-documents''' )
_a = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_a = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase , from_slow=UpperCamelCase )
_a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_a = DonutProcessor(UpperCamelCase , UpperCamelCase )
_a = processor(UpperCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_a = '''When is the coffee break?'''
_a = task_prompt.replace('''{user_input}''' , UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_a = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_a = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_a = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_a = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_a = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_a = original_model.decoder.tokenizer(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_a = original_model.encoder.model.patch_embed(UpperCamelCase )
_a , _a = model.encoder.embeddings(UpperCamelCase )
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
# verify encoder hidden states
_a = original_model.encoder(UpperCamelCase )
_a = model.encoder(UpperCamelCase ).last_hidden_state
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 )
# verify decoder hidden states
_a = original_model(UpperCamelCase , UpperCamelCase , UpperCamelCase ).logits
_a = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 22 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = []
_a = []
_a = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
_a = len(UpperCamelCase ) if (len(UpperCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(UpperCamelCase ) , '''Postfix'''.center(UpperCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase ) == 0:
stack.append(UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(UpperCamelCase ) # return Postfix as str
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase ) ):
if infix[i] == "(":
_a = ''')''' # change "(" to ")"
elif infix[i] == ")":
_a = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_snake_case : int = input('\nEnter an Infix Equation = ') # Input an Infix equation
_snake_case : List[str] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.