code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
return x + 2
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'x = 3'
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(A, {}, state=A )
assert result == 3
self.assertDictEqual(A, {'x': 3} )
SCREAMING_SNAKE_CASE : Any = 'x = y'
SCREAMING_SNAKE_CASE : Tuple = {'y': 5}
SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A, {'x': 5, 'y': 5} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 'y = add_two(x)'
SCREAMING_SNAKE_CASE : str = {'x': 3}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(A, {'add_two': add_two}, state=A )
assert result == 5
self.assertDictEqual(A, {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 'x = 3'
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A )
assert result == 3
self.assertDictEqual(A, {'x': 3} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
SCREAMING_SNAKE_CASE : List[Any] = {'x': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(A, {'add_two': add_two}, state=A )
self.assertDictEqual(A, {'x': 3, 'y': 5} )
self.assertDictEqual(A, {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 'x = 3\ny = 5'
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : str = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A, {'x': 3, 'y': 5} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'text = f\'This is x: {x}.\''
SCREAMING_SNAKE_CASE : Any = {'x': 3}
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A, {'x': 3, 'text': 'This is x: 3.'} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'if x <= 3:\n y = 2\nelse:\n y = 5'
SCREAMING_SNAKE_CASE : List[Any] = {'x': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A, {'x': 3, 'y': 2} )
SCREAMING_SNAKE_CASE : Optional[int] = {'x': 8}
SCREAMING_SNAKE_CASE : str = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A, {'x': 8, 'y': 5} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'test_list = [x, add_two(x)]'
SCREAMING_SNAKE_CASE : Dict = {'x': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {'add_two': add_two}, state=A )
self.assertListEqual(A, [3, 5] )
self.assertDictEqual(A, {'x': 3, 'test_list': [3, 5]} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 'y = x'
SCREAMING_SNAKE_CASE : Any = {'x': 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A )
assert result == 3
self.assertDictEqual(A, {'x': 3, 'y': 3} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 'test_list = [x, add_two(x)]\ntest_list[1]'
SCREAMING_SNAKE_CASE : Any = {'x': 3}
SCREAMING_SNAKE_CASE : str = evaluate(A, {'add_two': add_two}, state=A )
assert result == 5
self.assertDictEqual(A, {'x': 3, 'test_list': [3, 5]} )
SCREAMING_SNAKE_CASE : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
SCREAMING_SNAKE_CASE : Optional[int] = {'x': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(A, {'add_two': add_two}, state=A )
assert result == 5
self.assertDictEqual(A, {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'x = 0\nfor i in range(3):\n x = i'
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {'range': range}, state=A )
assert result == 2
self.assertDictEqual(A, {'x': 2, 'i': 2} )
| 28 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 100 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
__a : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
__a : int = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ):
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ):
def identity_function(lowerCamelCase_ : float ) -> float:
return x
__a : int = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Tuple = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
__a : str = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 7 , lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__a : Optional[int] = 0
__a : Any = 1
for current_denominator in range(1 , limit + 1 ):
__a : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__a : int = current_numerator
__a : List[str] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 577 | 0 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def a ( __UpperCAmelCase : Any ) -> Optional[Any]:
print("""Loading config file...""" )
def flatten_yaml_as_dict(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict="" , __UpperCAmelCase : Dict="." ):
__magic_name__: List[str] = []
for k, v in d.items():
__magic_name__: str = parent_key + sep + k if parent_key else k
if isinstance(lowerCamelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCamelCase_ , lowerCamelCase_ , sep=lowerCamelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCamelCase_ )
__magic_name__: Optional[int] = argparse.Namespace()
with open(lowerCamelCase_ , """r""" ) as yaml_file:
try:
__magic_name__: Union[str, Any] = yaml.load(lowerCamelCase_ , Loader=yaml.FullLoader )
__magic_name__: Optional[Any] = flatten_yaml_as_dict(lowerCamelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(lowerCamelCase_ , str(lowerCamelCase_ ) ) )
return config
def a ( __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Dict:
__magic_name__: Optional[int] = MobileViTVaConfig()
__magic_name__: int = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
__magic_name__: Any = 1_0_0_0
if int(task_name.strip().split("""_""" )[-1] ) == 3_8_4:
__magic_name__: Optional[int] = 3_8_4
else:
__magic_name__: Optional[Any] = 2_5_6
__magic_name__: int = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
__magic_name__: Optional[int] = 2_1_0_0_0
if int(task_name.strip().split("""_""" )[-1] ) == 3_8_4:
__magic_name__: Optional[Any] = 3_8_4
else:
__magic_name__: Optional[int] = 2_5_6
__magic_name__: str = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
__magic_name__: Union[str, Any] = 1_5_1
__magic_name__: Optional[Any] = 5_1_2
__magic_name__: str = """ade20k-id2label.json"""
__magic_name__: Optional[Any] = True
elif task_name.startswith("""voc_""" ):
__magic_name__: Optional[Any] = 2_1
__magic_name__: int = 5_1_2
__magic_name__: Tuple = """pascal-voc-id2label.json"""
__magic_name__: Dict = True
# orig_config
__magic_name__: Tuple = load_orig_config_file(lowerCamelCase_ )
assert getattr(lowerCamelCase_ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
__magic_name__: Optional[int] = getattr(lowerCamelCase_ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(lowerCamelCase_ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__magic_name__: Union[str, Any] = getattr(lowerCamelCase_ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__magic_name__: int = getattr(lowerCamelCase_ , """model.segmentation.output_stride""" , 1_6 )
if "_deeplabv3" in task_name:
__magic_name__: int = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_rates""" , [1_2, 2_4, 3_6] )
__magic_name__: List[Any] = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_1_2 )
__magic_name__: Union[str, Any] = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
__magic_name__: str = """huggingface/label-files"""
__magic_name__: Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
__magic_name__: List[str] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__magic_name__: Any = idalabel
__magic_name__: Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Any:
__magic_name__: Optional[int] = dct.pop(lowerCamelCase_ )
__magic_name__: Optional[int] = val
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any]=False ) -> int:
if base_model:
__magic_name__: int = """"""
else:
__magic_name__: str = """mobilevitv2."""
__magic_name__: Optional[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__magic_name__: Tuple = k[8:]
else:
__magic_name__: List[str] = k
if ".block." in k:
__magic_name__: List[Any] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
__magic_name__: Any = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
__magic_name__: Tuple = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
__magic_name__: str = k_new.replace("""conv_1.""" , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
__magic_name__: List[Any] = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
__magic_name__: Any = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
__magic_name__: Optional[int] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
__magic_name__: Tuple = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
__magic_name__: Union[str, Any] = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
__magic_name__: str = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
__magic_name__: Any = [0, 1]
elif i == 4:
__magic_name__: Optional[int] = [0, 1, 2, 3]
elif i == 5:
__magic_name__: Tuple = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
__magic_name__: Any = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
__magic_name__: Optional[Any] = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
__magic_name__: List[Any] = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
__magic_name__: Tuple = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
__magic_name__: Optional[Any] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
__magic_name__: int = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
__magic_name__: Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
__magic_name__: Tuple = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
__magic_name__: Optional[Any] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
__magic_name__: Tuple = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
__magic_name__: Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
__magic_name__: Union[str, Any] = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def a ( __UpperCAmelCase : List[Any] ) -> int:
__magic_name__: int = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(lowerCamelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def a ( ) -> str:
__magic_name__: Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__magic_name__: Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Optional[int]:
__magic_name__: Optional[Any] = get_mobilevitva_config(lowerCamelCase_ , lowerCamelCase_ )
# load original state_dict
__magic_name__: str = torch.load(lowerCamelCase_ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
__magic_name__: str = MobileViTVaForSemanticSegmentation(lowerCamelCase_ ).eval()
__magic_name__: Any = False
else:
__magic_name__: List[str] = MobileViTVaForImageClassification(lowerCamelCase_ ).eval()
__magic_name__: Dict = False
# remove and rename some keys of load the original model
__magic_name__: List[str] = checkpoint
remove_unused_keys(lowerCamelCase_ )
__magic_name__: Any = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load modified state_dict
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__magic_name__: int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__magic_name__: Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
__magic_name__: Dict = model(**lowerCamelCase_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
__magic_name__: Union[str, Any] = outputs.logits
__magic_name__: Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__magic_name__: int = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1E-4 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 96 |
'''simple docstring'''
__A : List[Any] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def UpperCAmelCase ( lowerCamelCase_ :float ):
'''simple docstring'''
assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ )
snake_case_ : int = int(lowerCamelCase_ )
snake_case_ : int = """"""
snake_case_ : List[str] = False
if decimal < 0:
snake_case_ : Any = True
decimal *= -1
while decimal > 0:
snake_case_ , snake_case_ : List[str] = divmod(lowerCamelCase_ , 16 )
snake_case_ : Tuple = values[remainder] + hexadecimal
snake_case_ : Dict = """0x""" + hexadecimal
if negative:
snake_case_ : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 334 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 113 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__A ='scheduler_config.json'
class _snake_case ( a__ ):
lowerCAmelCase :Union[str, Any] = 1
lowerCAmelCase :Union[str, Any] = 2
lowerCAmelCase :Optional[int] = 3
lowerCAmelCase :Optional[int] = 4
lowerCAmelCase :Any = 5
lowerCAmelCase :Tuple = 6
lowerCAmelCase :List[Any] = 7
lowerCAmelCase :str = 8
lowerCAmelCase :List[Any] = 9
lowerCAmelCase :List[str] = 10
lowerCAmelCase :Union[str, Any] = 11
lowerCAmelCase :Optional[int] = 12
lowerCAmelCase :str = 13
lowerCAmelCase :Dict = 14
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :torch.FloatTensor
class _snake_case :
lowerCAmelCase :str = SCHEDULER_CONFIG_NAME
lowerCAmelCase :Union[str, Any] = []
lowerCAmelCase :List[str] = True
@classmethod
def snake_case__ ( cls , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , **_lowerCamelCase , ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , return_commit_hash=_lowerCamelCase , **_lowerCamelCase , )
return cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , **_lowerCamelCase):
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
return self._get_compatibles()
@classmethod
def snake_case__ ( cls):
UpperCAmelCase__ : List[Any] = list(set([cls.__name__] + cls._compatibles))
UpperCAmelCase__ : int = importlib.import_module(__name__.split(""".""")[0])
UpperCAmelCase__ : Dict = [
getattr(_lowerCamelCase , _lowerCamelCase) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase)
]
return compatible_classes | 113 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = IFPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __magic_name__ ( self ):
return self._get_dummy_components()
def __magic_name__ ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
lowercase : List[str] = torch.manual_seed(_a )
else:
lowercase : Dict = torch.Generator(device=_a ).manual_seed(_a )
lowercase : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __magic_name__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ ( self ):
self._test_save_load_local()
def __magic_name__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
# if
lowercase : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowercase : List[str] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=_a , tokenizer=_a )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowercase , lowercase : int = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase : List[str] = None
lowercase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
lowercase : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase : List[str] = IFInpaintingPipeline(**pipe_a.components )
lowercase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_a , _a , _a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Optional[Any] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Union[str, Any] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
lowercase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : int = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_a )
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Dict = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
lowercase : str = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_a )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 361 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_A : List[str] = get_logger(__name__)
class a__ :
__lowerCAmelCase = """dummy_data"""
__lowerCAmelCase = """datasets"""
__lowerCAmelCase = False
def __init__( self , _a , _a , _a , _a = None , _a = False , _a = True , _a = None , ):
lowercase : int = 0
lowercase : Optional[Any] = dataset_name
lowercase : List[str] = cache_dir
lowercase : Union[str, Any] = use_local_dummy_data
lowercase : str = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Tuple = str(_a )
# to be downloaded
lowercase : Tuple = None
lowercase : List[Any] = None
@property
def __magic_name__ ( self ):
if self._dummy_file is None:
lowercase : Optional[int] = self.download_dummy_data()
return self._dummy_file
@property
def __magic_name__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : str = cached_path(
_a , cache_dir=self.cache_dir , extract_compressed_file=_a , force_extract=_a )
return os.path.join(_a , self.dummy_file_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __magic_name__ ( self ):
if self._bucket_url is None:
lowercase : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def __magic_name__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def __magic_name__ ( self , _a , *_a ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Optional[int] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_a , _a ):
return self.create_dummy_data_dict(_a , _a )
elif isinstance(_a , (list, tuple) ):
return self.create_dummy_data_list(_a , _a )
else:
return self.create_dummy_data_single(_a , _a )
def __magic_name__ ( self , _a , *_a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , _a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , *_a , **_a ):
return path
def __magic_name__ ( self ):
return {}
def __magic_name__ ( self , _a , _a ):
lowercase : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_a , _a ):
for single_url in single_urls:
download_callback(_a )
else:
lowercase : Union[str, Any] = single_urls
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_a , _a ):
lowercase : Any = [os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) )
lowercase : List[str] = value
# make sure that values are unique
if all(isinstance(_a , _a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __magic_name__ ( self , _a , _a ):
lowercase : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _a ) ) for url in data_url )
lowercase : List[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : Tuple = [data_url[0]] * len(_a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Union[str, Any] = os.path.join(_a , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_a )
return dummy_data_list
def __magic_name__ ( self , _a , _a ):
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self , _a ):
def _iter_archive_members(_a ):
# this preserves the order of the members inside the ZIP archive
lowercase : Optional[int] = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_a )
lowercase : Union[str, Any] = Path(_a )
lowercase : List[Any] = _iter_archive_members(_a ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_a ).as_posix(), file_path.open("rb" )
def __magic_name__ ( self , _a ):
if not isinstance(_a , _a ):
lowercase : Any = [paths]
for path in paths:
if os.path.isfile(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_a ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_a , _a )
| 361 | 1 |
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = tuple[float, float, float]
lowerCAmelCase_ : List[Any] = tuple[float, float, float]
def __a ( __lowerCamelCase : Pointad , __lowerCamelCase : Pointad ) -> Vectorad:
'''simple docstring'''
lowercase_ = end_pointa[0] - end_pointa[0]
lowercase_ = end_pointa[1] - end_pointa[1]
lowercase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __a ( __lowerCamelCase : Vectorad , __lowerCamelCase : Vectorad ) -> Vectorad:
'''simple docstring'''
lowercase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __a ( __lowerCamelCase : Vectorad , __lowerCamelCase : int ) -> bool:
'''simple docstring'''
return tuple(round(__lowerCamelCase , __lowerCamelCase ) for x in vector ) == (0, 0, 0)
def __a ( __lowerCamelCase : Pointad , __lowerCamelCase : Pointad , __lowerCamelCase : Pointad , __lowerCamelCase : int = 10 ) -> bool:
'''simple docstring'''
lowercase_ = create_vector(__lowerCamelCase , __lowerCamelCase )
lowercase_ = create_vector(__lowerCamelCase , __lowerCamelCase )
return is_zero_vector(get_ad_vectors_cross(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
| 461 | '''simple docstring'''
def __a ( __lowerCamelCase : int = 200 ) -> int:
'''simple docstring'''
lowercase_ = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ = [0] * (pence + 1)
lowercase_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 461 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def A__ ( snake_case_ : str , snake_case_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
SCREAMING_SNAKE_CASE__: Dict= XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
else:
SCREAMING_SNAKE_CASE__: int= ProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= ProphetNetForConditionalGeneration.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= ['''key_proj''', '''value_proj''', '''query_proj''']
SCREAMING_SNAKE_CASE__: Optional[int]= {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
SCREAMING_SNAKE_CASE__: Optional[int]= key.split('''.''' )
if attributes[0] == "lm_head":
SCREAMING_SNAKE_CASE__: List[str]= prophet
SCREAMING_SNAKE_CASE__: List[Any]= prophet_old
else:
SCREAMING_SNAKE_CASE__: List[Any]= prophet.prophetnet
SCREAMING_SNAKE_CASE__: Dict= prophet_old.model
SCREAMING_SNAKE_CASE__: Tuple= False
for attribute in attributes:
if attribute in mapping:
SCREAMING_SNAKE_CASE__: Optional[Any]= mapping[attribute]
if not hasattr(snake_case_ , snake_case_ ) and len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE__: Optional[int]= attribute
elif hasattr(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: Optional[int]= attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE__: Tuple= old_model.weight
logger.info(F'{attribute} is initialized.' )
SCREAMING_SNAKE_CASE__: Optional[Any]= True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE__: Dict= old_model.bias
logger.info(F'{attribute} is initialized' )
SCREAMING_SNAKE_CASE__: Tuple= True
break
elif attribute in special_keys and hasattr(snake_case_ , '''in_proj_weight''' ):
SCREAMING_SNAKE_CASE__: Any= old_model.in_proj_weight.shape[0] // 3
SCREAMING_SNAKE_CASE__: List[str]= getattr(snake_case_ , snake_case_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
SCREAMING_SNAKE_CASE__: Any= nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
SCREAMING_SNAKE_CASE__: List[str]= nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
SCREAMING_SNAKE_CASE__: Dict= nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
SCREAMING_SNAKE_CASE__: str= nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
SCREAMING_SNAKE_CASE__: Dict= nn.Parameter(old_model.embed_positions.weight[:512, :] )
SCREAMING_SNAKE_CASE__: Any= True
break
if attribute.isdigit():
SCREAMING_SNAKE_CASE__: Tuple= model[int(snake_case_ )]
SCREAMING_SNAKE_CASE__: Tuple= old_model[int(snake_case_ )]
else:
SCREAMING_SNAKE_CASE__: List[str]= getattr(snake_case_ , snake_case_ )
if old_attribute == "":
SCREAMING_SNAKE_CASE__: List[Any]= old_model
else:
if not hasattr(snake_case_ , snake_case_ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
SCREAMING_SNAKE_CASE__: int= getattr(snake_case_ , snake_case_ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 64 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = XCLIPTextConfig()
# derive patch size from model name
snake_case_ : List[str] = model_name.find("""patch""" )
snake_case_ : Optional[Any] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ : Tuple = XCLIPVisionConfig(patch_size=__UpperCamelCase , num_frames=__UpperCamelCase )
if "large" in model_name:
snake_case_ : int = 7_6_8
snake_case_ : Optional[int] = 3_0_7_2
snake_case_ : List[Any] = 1_2
snake_case_ : List[Any] = 1_0_2_4
snake_case_ : List[str] = 4_0_9_6
snake_case_ : Tuple = 1_6
snake_case_ : Optional[Any] = 2_4
snake_case_ : Any = 7_6_8
snake_case_ : Optional[Any] = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
snake_case_ : int = 3_3_6
snake_case_ : str = XCLIPConfig.from_text_vision_configs(__UpperCamelCase , __UpperCamelCase )
if "large" in model_name:
snake_case_ : Any = 7_6_8
return config
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ : List[Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ : Tuple = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ : str = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
snake_case_ : Any = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
snake_case_ : Dict = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
snake_case_ : Optional[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ : Tuple = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ : List[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ : List[str] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ : Optional[Any] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ : Optional[Any] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ : List[Any] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ : List[Any] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ : str = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ : Optional[Any] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ : Dict = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ : Optional[Any] = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ : Dict = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ : Optional[Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ : Dict = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ : Tuple = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ : int = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ : str = orig_state_dict.pop(__UpperCamelCase )
if "attn.in_proj" in key:
snake_case_ : List[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ : Union[str, Any] = key_split[3]
snake_case_ : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ : str = val[
:dim, :
]
snake_case_ : Dict = val[
dim : dim * 2, :
]
snake_case_ : Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ : Tuple = val[
:dim
]
snake_case_ : int = val[
dim : dim * 2
]
snake_case_ : Any = val[
-dim:
]
else:
if "weight" in key:
snake_case_ : Optional[Any] = val[
:dim, :
]
snake_case_ : List[str] = val[
dim : dim * 2, :
]
snake_case_ : str = val[
-dim:, :
]
else:
snake_case_ : List[Any] = val[:dim]
snake_case_ : List[Any] = val[
dim : dim * 2
]
snake_case_ : Any = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ : Tuple = key_split[2]
snake_case_ : Any = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ : Dict = val[:dim, :]
snake_case_ : str = val[dim : dim * 2, :]
snake_case_ : Optional[int] = val[-dim:, :]
else:
snake_case_ : int = val[:dim]
snake_case_ : Optional[Any] = val[dim : dim * 2]
snake_case_ : int = val[-dim:]
else:
snake_case_ : Any = key_split[2]
snake_case_ : int = config.text_config.hidden_size
if "weight" in key:
snake_case_ : List[str] = val[:dim, :]
snake_case_ : List[str] = val[
dim : dim * 2, :
]
snake_case_ : Tuple = val[-dim:, :]
else:
snake_case_ : Tuple = val[:dim]
snake_case_ : int = val[
dim : dim * 2
]
snake_case_ : Tuple = val[-dim:]
else:
snake_case_ : str = rename_key(__UpperCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ : Tuple = val.T
snake_case_ : Any = val
return orig_state_dict
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
if num_frames == 8:
snake_case_ : Optional[int] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 1_6:
snake_case_ : Tuple = """eating_spaghetti.npy"""
elif num_frames == 3_2:
snake_case_ : Union[str, Any] = """eating_spaghetti_32_frames.npy"""
snake_case_ : str = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=__UpperCamelCase , repo_type="""dataset""" , )
snake_case_ : str = np.load(__UpperCamelCase )
return list(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=False ):
'''simple docstring'''
snake_case_ : int = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ : Optional[int] = model_to_url[model_name]
snake_case_ : str = 8
if "16-frames" in model_name:
snake_case_ : str = 1_6
elif "shot" in model_name:
snake_case_ : Optional[int] = 3_2
snake_case_ : Optional[Any] = get_xclip_config(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = XCLIPModel(__UpperCamelCase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ : Any = """pytorch_model.bin"""
gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase )
snake_case_ : List[Any] = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model"""]
else:
snake_case_ : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase )["""model"""]
snake_case_ : int = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
snake_case_ : int = XCLIPModel(__UpperCamelCase )
snake_case_ , snake_case_ : int = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ : Union[str, Any] = 3_3_6 if model_name == """xclip-large-patch14-16-frames""" else 2_2_4
snake_case_ : List[Any] = VideoMAEImageProcessor(size=__UpperCamelCase )
snake_case_ : List[str] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ : Tuple = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ : List[str] = XCLIPProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
snake_case_ : List[str] = prepare_video(__UpperCamelCase )
snake_case_ : List[str] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ : str = model(**__UpperCamelCase )
# Verify outputs
snake_case_ : Optional[Any] = outputs.logits_per_video
snake_case_ : List[Any] = logits_per_video.softmax(dim=1 )
print("""Probs:""" , __UpperCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ : Optional[Any] = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ : List[str] = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
snake_case_ : int = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ : Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
snake_case_ : int = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ : str = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ : Optional[Any] = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ : Optional[Any] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ : Dict = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ : List[str] = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ : Optional[int] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ : str = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ : Optional[Any] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ : Optional[int] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ : List[str] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ : Tuple = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ : List[Any] = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ : Union[str, Any] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(__UpperCamelCase , organization="""nielsr""" )
processor.push_to_hub(__UpperCamelCase , organization="""nielsr""" )
slow_tokenizer.push_to_hub(__UpperCamelCase , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 526 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__( lowerCamelCase__ ):
lowercase__ = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase__ = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase__ = """document_qa"""
lowercase__ = AutoProcessor
lowercase__ = VisionEncoderDecoderModel
lowercase__ = ["""image""", """text"""]
lowercase__ = ["""text"""]
def __init__( self : str , *__snake_case : Any , **__snake_case : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : "Image" , __snake_case : str ):
a : Union[str, Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
a : Any = task_prompt.replace('{user_input}' , __snake_case )
a : Dict = self.pre_processor.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors='pt' ).input_ids
a : Tuple = self.pre_processor(__snake_case , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase_ ( self : Any , __snake_case : int ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__snake_case , ).sequences
def lowercase_ ( self : List[str] , __snake_case : Tuple ):
a : str = self.pre_processor.batch_decode(__snake_case )[0]
a : str = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
a : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
a : List[str] = re.sub(r'<.*?>' , '' , __snake_case , count=1 ).strip() # remove first task start token
a : Dict = self.pre_processor.tokenajson(__snake_case )
return sequence["answer"] | 526 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = torch.device("""cpu""")
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : List[str] = dct.pop(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = val
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Dict = []
for k in state_dict.keys():
lowerCamelCase__ : Tuple = k
if ".pwconv" in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
lowerCamelCase__ : Any = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
lowerCamelCase__ : int = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
lowerCamelCase__ : Optional[int] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
lowerCamelCase__ : Any = k_new.split('.' )
if ls[2].isdigit():
lowerCamelCase__ : Optional[Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
lowerCamelCase__ : Tuple = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : str = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__ : Dict = 1000
lowerCamelCase__ : Union[str, Any] = 'huggingface/label-files'
lowerCamelCase__ : List[str] = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Optional[int] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : List[str] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase__ : Dict = [3, 3, 6, 4]
lowerCamelCase__ : List[str] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase__ : Tuple = [3, 3, 9, 6]
lowerCamelCase__ : Dict = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase__ : Optional[int] = [4, 3, 10, 5]
lowerCamelCase__ : Any = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase__ : Optional[Any] = [4, 4, 12, 6]
lowerCamelCase__ : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' , check_hash=_UpperCAmelCase )
else:
lowerCamelCase__ : Dict = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : Optional[int] = checkpoint
lowerCamelCase__ : int = create_rename_keys(_UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# load HuggingFace model
lowerCamelCase__ : Optional[Any] = SwiftFormerForImageClassification(_UpperCAmelCase ).eval()
hf_model.load_state_dict(_UpperCAmelCase )
# prepare test inputs
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Dict = ViTImageProcessor.from_pretrained('preprocessor_config' )
lowerCamelCase__ : Any = processor(images=_UpperCAmelCase , return_tensors='pt' )
# compare outputs from both models
lowerCamelCase__ : Optional[int] = get_expected_output(_UpperCAmelCase )
lowerCamelCase__ : Any = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCAmelCase , atol=1e-3 )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
_UpperCAmelCase : str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 707 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : str=32 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Dict=10 , UpperCAmelCase : List[str]=[10, 20, 30, 40] , UpperCAmelCase : Any=[1, 1, 2, 1] , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict="relu" , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict=None , ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : int = embeddings_size
lowerCamelCase__ : str = hidden_sizes
lowerCamelCase__ : Any = depths
lowerCamelCase__ : str = is_training
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Dict = num_labels
lowerCamelCase__ : Dict = scope
lowerCamelCase__ : List[str] = len(UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : str = self.get_config()
return config, pixel_values
def A_ ( self : Optional[int] ) -> Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = FlaxRegNetModel(config=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : str , UpperCAmelCase : int , UpperCAmelCase : Tuple ) -> Tuple:
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : str = FlaxRegNetForImageClassification(config=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[Any] ) -> None:
lowerCamelCase__ : List[Any] = FlaxRegNetModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A_ ( self : int ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Any ) -> str:
return
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def A_ ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def A_ ( self : Any ) -> Tuple:
pass
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A_ ( self : int ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ):
lowerCamelCase__ : Any = model_class(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Dict ) -> int:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ):
return model(pixel_values=UpperCAmelCase , **UpperCAmelCase )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ : Dict = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[int] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : Any ) -> List[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=UpperCAmelCase , return_tensors='np' )
lowerCamelCase__ : List[Any] = model(**UpperCAmelCase )
# verify the logits
lowerCamelCase__ : Union[str, Any] = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase__ : Tuple = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 188 | 0 |
import math
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[Any] = input('Enter message: ' )
A_ : List[Any] = int(input(f'''Enter key [2-{len(lowercase_ ) - 1}]: ''' ) )
A_ : Tuple = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
A_ : List[str] = encrypt_message(lowercase_ ,lowercase_ )
elif mode.lower().startswith('d' ):
A_ : Any = decrypt_message(lowercase_ ,lowercase_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + '|'}''' )
def UpperCamelCase ( __lowercase : int ,__lowercase : str ):
'''simple docstring'''
A_ : Optional[int] = [''] * key
for col in range(lowercase_ ):
A_ : str = col
while pointer < len(lowercase_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowercase_ )
def UpperCamelCase ( __lowercase : int ,__lowercase : str ):
'''simple docstring'''
A_ : int = math.ceil(len(lowercase_ ) / key )
A_ : Any = key
A_ : Optional[int] = (num_cols * num_rows) - len(lowercase_ )
A_ : str = [''] * num_cols
A_ : List[str] = 0
A_ : Any = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A_ : List[str] = 0
row += 1
return "".join(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 558 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'encodec'
def __init__( self , snake_case_=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case_=2_40_00 , snake_case_=1 , snake_case_=False , snake_case_=None , snake_case_=None , snake_case_=1_28 , snake_case_=32 , snake_case_=1 , snake_case_=[8, 5, 4, 2] , snake_case_="weight_norm" , snake_case_=7 , snake_case_=7 , snake_case_=3 , snake_case_=2 , snake_case_=True , snake_case_="reflect" , snake_case_=2 , snake_case_=2 , snake_case_=1.0 , snake_case_=10_24 , snake_case_=None , snake_case_=True , **snake_case_ , ):
lowercase =target_bandwidths
lowercase =sampling_rate
lowercase =audio_channels
lowercase =normalize
lowercase =chunk_length_s
lowercase =overlap
lowercase =hidden_size
lowercase =num_filters
lowercase =num_residual_layers
lowercase =upsampling_ratios
lowercase =norm_type
lowercase =kernel_size
lowercase =last_kernel_size
lowercase =residual_kernel_size
lowercase =dilation_growth_rate
lowercase =use_causal_conv
lowercase =pad_mode
lowercase =compress
lowercase =num_lstm_layers
lowercase =trim_right_ratio
lowercase =codebook_size
lowercase =codebook_dim if codebook_dim is not None else hidden_size
lowercase =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**snake_case_ )
@property
def _A( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _A( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _A( self ):
lowercase =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _A( self ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 72 | 0 |
"""simple docstring"""
def lowerCamelCase_( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_lowerCAmelCase : str = generate_large_matrix()
_lowerCAmelCase : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
assert all(row == sorted(_lowerCamelCase , reverse=_lowerCamelCase ) for row in grid )
assert all(list(_lowerCamelCase ) == sorted(_lowerCamelCase , reverse=_lowerCamelCase ) for col in zip(*_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : str = len(_lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowerCamelCase : int = (left + right) // 2
_lowerCamelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowerCamelCase : str = mid + 1
else:
_lowerCamelCase : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = len(grid[0] )
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCamelCase ) * len(grid[0] )) - total
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 0
for row in grid:
for i, number in enumerate(_lowerCamelCase ):
if number < 0:
total += len(_lowerCamelCase ) - i
break
return total
def lowerCamelCase_( ) -> None:
'''simple docstring'''
from timeit import timeit
print("Running benchmarks" )
_lowerCamelCase : List[Any] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowerCamelCase : Tuple = timeit(F"""{func}(grid=grid)""" , setup=_lowerCamelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 708 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MvpTokenizer
lowerCAmelCase__ = MvpTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_roberta_detectors
def _lowercase ( self: Tuple ):
'''simple docstring'''
super().setUp()
_lowerCamelCase : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCamelCase : Optional[int] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : str = {"unk_token": "<unk>"}
_lowerCamelCase : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCAmelCase ) )
def _lowercase ( self: int ,**__lowerCAmelCase: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: str ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self: Any ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def _lowercase ( self: Any ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_lowerCamelCase : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : int = tokenizer(__lowerCAmelCase ,max_length=len(__lowerCAmelCase ) ,padding=__lowerCAmelCase ,return_tensors="pt" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowerCamelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase ,padding=__lowerCAmelCase ,return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" ,__lowerCAmelCase )
self.assertIn("attention_mask" ,__lowerCAmelCase )
self.assertNotIn("labels" ,__lowerCAmelCase )
self.assertNotIn("decoder_attention_mask" ,__lowerCAmelCase )
@require_torch
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Dict = tokenizer(text_target=__lowerCAmelCase ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Any = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,return_tensors="pt" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(batch.input_ids.shape ,(2, 1_024) )
@require_torch
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = ["A long paragraph for summarization."]
_lowerCamelCase : str = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : List[Any] = tokenizer(__lowerCAmelCase ,text_target=__lowerCAmelCase ,return_tensors="pt" )
_lowerCamelCase : List[Any] = inputs["input_ids"]
_lowerCamelCase : Optional[Any] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : str = "A, <mask> AllenNLP sentence."
_lowerCamelCase : Optional[Any] = tokenizer_r.encode_plus(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer_p.encode_plus(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
_lowerCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_lowerCamelCase : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCAmelCase ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) | 386 | 0 |
"""simple docstring"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 589 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase ( UpperCamelCase__ : Tuple="ro" , UpperCamelCase__ : Any="en" , UpperCamelCase__ : Dict="wmt16" , UpperCamelCase__ : Dict=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__UpperCAmelCase = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__UpperCAmelCase = datasets.load_dataset(UpperCamelCase__ , UpperCamelCase__ )
if save_dir is None:
__UpperCAmelCase = f"""{dataset}-{pair}"""
__UpperCAmelCase = Path(UpperCamelCase__ )
save_dir.mkdir(exist_ok=UpperCamelCase__ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__UpperCAmelCase = '''val''' if split == '''validation''' else split
__UpperCAmelCase = save_dir.joinpath(f"""{fn}.source""" )
__UpperCAmelCase = save_dir.joinpath(f"""{fn}.target""" )
__UpperCAmelCase = src_path.open('''w+''' )
__UpperCAmelCase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__UpperCAmelCase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 654 | '''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 1 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[list]:
'''simple docstring'''
UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(UpperCamelCase__ ):
UpperCAmelCase = row[0]
for column_index, column in enumerate(UpperCamelCase__ ):
if magnitude == 0:
UpperCAmelCase = column
continue
UpperCAmelCase = column / magnitude
# Subtract to cancel term
UpperCAmelCase = current_set[0]
UpperCAmelCase = [first_row]
UpperCAmelCase = current_set[1::]
for row in current_set:
UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCamelCase__ )
continue
for column_index in range(len(UpperCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase = final_set[0]
UpperCAmelCase = []
UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase = simplify(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , UpperCamelCase__ )
UpperCAmelCase = resultant
return final_set
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list:
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
UpperCAmelCase = len(UpperCamelCase__ ) + 1
if any(len(UpperCamelCase__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(UpperCamelCase__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(UpperCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase = data_set.copy()
UpperCAmelCase = []
for row_index, row in enumerate(UpperCamelCase__ ):
if 0 not in row:
UpperCAmelCase = data_set.pop(UpperCamelCase__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , UpperCamelCase__ )
UpperCAmelCase = data_set.copy()
UpperCAmelCase = simplify(UpperCamelCase__ )
UpperCAmelCase = simplified[::-1]
UpperCAmelCase = []
for row in simplified:
UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase = row.copy()[: len(UpperCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCamelCase__ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase = temp_row[1::]
UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(UpperCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCamelCase__ )
UpperCAmelCase = []
for item in solutions:
final.append(float(round(UpperCamelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Any = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 130 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__A : int = None
__A : int = logging.get_logger(__name__)
__A : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : List[str] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
__A : Any = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
__A : List[str] = "▁"
# Segments (not really needed)
__A : Tuple = 0
__A : str = 1
__A : Any = 2
__A : Dict = 3
__A : Any = 4
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = '''left'''
UpperCAmelCase__ = XLNetTokenizer
def __init__( self , _A=None , _A=None , _A=False , _A=True , _A=False , _A="<s>" , _A="</s>" , _A="<unk>" , _A="<sep>" , _A="<pad>" , _A="<cls>" , _A="<mask>" , _A=["<eop>", "<eod>"] , **_A , ):
'''simple docstring'''
UpperCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
vocab_file=_A , tokenizer_file=_A , do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
UpperCAmelCase = 3
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 130 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : str = seq_length
__A : Tuple = is_training
__A : Optional[int] = use_attention_mask
__A : Dict = use_token_type_ids
__A : Union[str, Any] = use_labels
__A : Tuple = vocab_size
__A : int = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Union[str, Any] = intermediate_size
__A : List[str] = hidden_act
__A : Optional[Any] = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Optional[Any] = type_vocab_size
__A : Tuple = type_sequence_label_size
__A : str = initializer_range
__A : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : Tuple = None
if self.use_attention_mask:
__A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.prepare_config_and_inputs()
__A ,__A ,__A : Any = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = FlaxDistilBertModelTester(self)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A : Dict = model_class_name.from_pretrained('distilbert-base-uncased')
__A : Tuple = model(np.ones((1, 1)))
self.assertIsNotNone(_UpperCAmelCase)
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__A : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
__A : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__A : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)[0]
__A : Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase)
__A : List[Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4)) | 338 |
'''simple docstring'''
import sys
import turtle
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase__ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase__ : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) | 338 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a ( ):
'''simple docstring'''
lowercase__ = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dct.pop(lowerCamelCase_ )
lowercase__ = val
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase__ = torch.cat((q_bias, torch.zeros_like(lowerCamelCase_ , requires_grad=lowerCamelCase_ ), v_bias) )
lowercase__ = qkv_bias
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 364 if '''coco''' in model_name else 224
lowercase__ = InstructBlipVisionConfig(image_size=lowerCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__ = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowercase__ = InstructBlipConfig(vision_config=lowerCamelCase_ , text_config=lowerCamelCase_ , qformer_config=lowerCamelCase_ )
return config, image_size
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
lowercase__ = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__ = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
lowercase__ , lowercase__ = get_blipa_config(lowerCamelCase_ )
lowercase__ = InstructBlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
lowercase__ , lowercase__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowercase__ = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
lowercase__ = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
lowercase__ , lowercase__ , lowercase__ = load_model_and_preprocess(
name=lowerCamelCase_ , model_type=lowerCamelCase_ , is_eval=lowerCamelCase_ , device=lowerCamelCase_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowercase__ = original_model.state_dict()
lowercase__ = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ = state_dict.pop(lowerCamelCase_ )
if key.startswith('''Qformer.bert''' ):
lowercase__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowercase__ = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
lowercase__ = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowercase__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
lowercase__ = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
lowercase__ = key.replace('''t5''' , '''language''' )
lowercase__ = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase_ , lowerCamelCase_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
lowercase__ = load_demo_image()
lowercase__ = '''What is unusual about this image?'''
# create processor
lowercase__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
lowercase__ = InstructBlipProcessor(
image_processor=lowerCamelCase_ , tokenizer=lowerCamelCase_ , qformer_tokenizer=lowerCamelCase_ , )
lowercase__ = processor(images=lowerCamelCase_ , text=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# make sure processor creates exact same pixel values
lowercase__ = vis_processors['''eval'''](lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
lowercase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
hf_model.to(lowerCamelCase_ )
with torch.no_grad():
if "vicuna" in model_name:
lowercase__ = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
lowercase__ = hf_model(**lowerCamelCase_ ).logits
else:
lowercase__ = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
lowercase__ = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ )
lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase__ = hf_model(**lowerCamelCase_ , labels=lowerCamelCase_ ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase__ = 1e-4 if '''vicuna''' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , lowerCamelCase_ , atol=lowerCamelCase_ )
print('''Looks ok!''' )
print('''Generating with original model...''' )
lowercase__ = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
lowercase__ = hf_model.generate(
**lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__ = 2
print('''Original generation:''' , lowerCamelCase_ )
lowercase__ = processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
lowercase__ = [text.strip() for text in output_text]
print('''HF generation:''' , lowerCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
A__ : Dict = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
A__ : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 183 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("""num_inference_steps""", 50),)
def lowercase__ ( self : Union[str, Any], **lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = {'''num_train_timesteps''': 1_000}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self : Any, lowerCamelCase : Any=0, **lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : str ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int]=0, **lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
return sample
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase, '''set_timesteps''' ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase, '''set_timesteps''' ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def lowercase__ ( self : Dict ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase, time_step=lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase, time_step=lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 183 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase_ :
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = inputs["""prompt"""]
UpperCAmelCase = inputs["""generator"""]
UpperCAmelCase = inputs["""num_inference_steps"""]
UpperCAmelCase = inputs["""output_type"""]
if "image" in inputs:
UpperCAmelCase = inputs["""image"""]
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs["""mask_image"""]
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs["""original_image"""]
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(snake_case__ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
UpperCAmelCase = self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case__ , snake_case__ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = inputs["""generator"""]
UpperCAmelCase = inputs["""num_inference_steps"""]
UpperCAmelCase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
UpperCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**snake_case__ )[0]
UpperCAmelCase = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
UpperCAmelCase = self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = pipe_loaded(**snake_case__ )[0]
UpperCAmelCase = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
| 378 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCamelCase_ ( a_ ):
_A : Any = 'big_bird'
def __init__( self , snake_case__=5_03_58 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=40_96 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 378 | 1 |
'''simple docstring'''
def __a ( _UpperCamelCase: str , _UpperCamelCase: str ) -> bool:
"""simple docstring"""
_snake_case = len(_UpperCamelCase ) + 1
_snake_case = len(_UpperCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_snake_case = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
# since string of zero length match pattern of zero length
_snake_case = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _UpperCamelCase ):
_snake_case = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _UpperCamelCase ):
_snake_case = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _UpperCamelCase ):
for j in range(1 , _UpperCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_snake_case = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_snake_case = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_snake_case = dp[i - 1][j]
else:
_snake_case = 0
else:
_snake_case = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCamelCase_ : Tuple = '''aab'''
UpperCamelCase_ : Optional[int] = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}')
| 185 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCamelCase_ : Dict = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCamelCase_ : int = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCamelCase_ : Optional[Any] = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCamelCase_ : Optional[int] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCamelCase_ : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _lowercase ( self ) -> Optional[int]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) ,homepage="https://github.com/openai/human-eval" ,codebase_urls=["https://github.com/openai/human-eval"] ,reference_urls=["https://github.com/openai/human-eval"] ,license=_LICENSE ,)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=[1, 10, 100] ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=3.0 ) -> Any:
if os.getenv("HF_ALLOW_CODE_EVAL" ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_SCREAMING_SNAKE_CASE ) as executor:
_snake_case = []
_snake_case = Counter()
_snake_case = 0
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
_snake_case = candidate + "\n" + test_case
_snake_case = (test_program, timeout, task_id, completion_id[task_id])
_snake_case = executor.submit(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE )
futures.append(_SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_SCREAMING_SNAKE_CASE ):
_snake_case = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_snake_case , _snake_case = [], []
for result in results.values():
result.sort()
_snake_case = [r[1]["passed"] for r in result]
total.append(len(_SCREAMING_SNAKE_CASE ) )
correct.append(sum(_SCREAMING_SNAKE_CASE ) )
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
_snake_case = k
_snake_case = {f"""pass@{k}""": estimate_pass_at_k(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Any , _UpperCamelCase: List[str] ) -> List[Any]:
"""simple docstring"""
def estimator(_UpperCamelCase: int , _UpperCamelCase: int , _UpperCamelCase: int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = itertools.repeat(_UpperCamelCase , len(_UpperCamelCase ) )
else:
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
_snake_case = iter(_UpperCamelCase )
return np.array([estimator(int(_UpperCamelCase ) , int(_UpperCamelCase ) , _UpperCamelCase ) for n, c in zip(_UpperCamelCase , _UpperCamelCase )] )
| 185 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, ...] ):
__SCREAMING_SNAKE_CASE : str = ""
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowerCAmelCase_ ( _lowerCamelCase: list[int] ):
__SCREAMING_SNAKE_CASE : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
__SCREAMING_SNAKE_CASE : Optional[int] = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowerCAmelCase_ ( _lowerCamelCase: list[str] , _lowerCamelCase: str ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase_ ( _lowerCamelCase: str = "p059_cipher.txt" ):
__SCREAMING_SNAKE_CASE : list[int]
__SCREAMING_SNAKE_CASE : list[str]
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="""utf-8""" )
__SCREAMING_SNAKE_CASE : List[Any] = [int(_lowerCamelCase ) for number in data.strip().split(""",""" )]
__SCREAMING_SNAKE_CASE : Tuple = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
__SCREAMING_SNAKE_CASE : Dict = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
__SCREAMING_SNAKE_CASE : str = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }") | 178 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 178 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase_ : Any = logging.getLogger()
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = {}
lowercase = os.path.join(lowercase_ , """all_results.json""" )
if os.path.exists(lowercase_ ):
with open(lowercase_ , """r""" ) as f:
lowercase = json.load(lowercase_ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase_ : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __UpperCamelCase (_UpperCAmelCase ):
def _a ( self ) -> Any:
'''simple docstring'''
import xla_spawn
lowercase = self.get_auto_remove_tmp_dir()
lowercase = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_lowerCAmelCase , """argv""" , _lowerCAmelCase ):
lowercase = time()
xla_spawn.main()
lowercase = time()
lowercase = get_results(_lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _a ( self ) -> Tuple:
'''simple docstring'''
import xla_spawn
lowercase = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(_lowerCAmelCase , """argv""" , _lowerCAmelCase ):
xla_spawn.main()
| 588 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def _a ( self ) -> str:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = FlaxBertModelTester(self )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 588 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> list[list[int]]:
UpperCamelCase__ : list[list[int]] = []
UpperCamelCase__ : list[int] = []
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = sum(__UpperCAmelCase )
create_state_space_tree(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return result
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: list[int] , __UpperCAmelCase: list[list[int]] , __UpperCAmelCase: int , ) -> None:
if sum(__UpperCAmelCase ) > max_sum or (remaining_nums_sum + sum(__UpperCAmelCase )) < max_sum:
return
if sum(__UpperCAmelCase ) == max_sum:
result.append(__UpperCAmelCase )
return
for index in range(__UpperCAmelCase , len(__UpperCAmelCase ) ):
create_state_space_tree(
__UpperCAmelCase , __UpperCAmelCase , index + 1 , [*path, nums[index]] , __UpperCAmelCase , remaining_nums_sum - nums[index] , )
UpperCAmelCase_ = [3, 34, 4, 12, 5, 2]
UpperCAmelCase_ = 9
UpperCAmelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 369 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=2, __magic_name__=56, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=99, __magic_name__=32, __magic_name__=2, __magic_name__=2, __magic_name__=7, __magic_name__="gelu_new", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=16, __magic_name__=2, __magic_name__=0.02, __magic_name__=4, __magic_name__="block_sparse", __magic_name__=True, __magic_name__=False, __magic_name__=2, __magic_name__=3, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : List[Any] = batch_size
UpperCamelCase__ : str = seq_length
UpperCamelCase__ : List[str] = is_training
UpperCamelCase__ : List[Any] = use_attention_mask
UpperCamelCase__ : Any = use_token_type_ids
UpperCamelCase__ : Tuple = use_labels
UpperCamelCase__ : Any = vocab_size
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : List[Any] = type_vocab_size
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Dict = num_choices
UpperCamelCase__ : Dict = rescale_embeddings
UpperCamelCase__ : Tuple = attention_type
UpperCamelCase__ : Union[str, Any] = use_bias
UpperCamelCase__ : Union[str, Any] = block_size
UpperCamelCase__ : Optional[int] = num_random_blocks
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : int = None
if self.use_attention_mask:
UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ : str = BigBirdConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__magic_name__, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = config_and_inputs
UpperCamelCase__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Any = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a : List[str] = False
a : str = False
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : Union[str, Any] = self._prepare_for_class(__magic_name__, __magic_name__ )
UpperCamelCase__ : Tuple = model_class(__magic_name__ )
@jax.jit
def model_jitted(__magic_name__, __magic_name__=None, **__magic_name__ ):
return model(input_ids=__magic_name__, attention_mask=__magic_name__, **__magic_name__ )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : str = model_jitted(**__magic_name__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : Dict = model_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ), len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__, __magic_name__ ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__=1E-5, __magic_name__="outputs", __magic_name__=None ) -> List[Any]:
"""simple docstring"""
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
| 369 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase__: int = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case_ :
__lowerCamelCase : List[Any] = PegasusConfig
__lowerCamelCase : List[Any] = {}
__lowerCamelCase : List[str] = '''gelu'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=20 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , ):
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : int = bos_token_id
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ : Any = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = 20
SCREAMING_SNAKE_CASE_ : Tuple = model_class_name(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ : Dict = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE_ : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ : Tuple = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Any = model.decode(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = 20
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ : Tuple = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ : str = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : List[Any] = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Tuple:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.not_equal(UpperCamelCase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case_ ( lowercase_ , unittest.TestCase ):
__lowerCamelCase : str = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__lowerCamelCase : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : List[str] = False
__lowerCamelCase : str = False
__lowerCamelCase : int = False
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : Optional[int] = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ : Any = model_class(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE_ : int = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : str = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class_name.from_pretrained('google/pegasus-large' , from_pt=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.ones((1, 1) )
SCREAMING_SNAKE_CASE_ : Any = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE_ : List[str] = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
SCREAMING_SNAKE_CASE_ : Dict = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(__lowerCAmelCase , return_tensors='np' , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 345 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowercase ( lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = '''resnet'''
__SCREAMING_SNAKE_CASE : Dict = ['''basic''', '''bottleneck''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(snake_case ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = version.parse('''1.11''' )
@property
def a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
return 1e-3
| 362 | 0 |
'''simple docstring'''
import string
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> str:
lowerCAmelCase__ = """"""
for i in sequence:
lowerCAmelCase__ = ord(UpperCAmelCase_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> str:
lowerCAmelCase__ = string.ascii_letters
lowerCAmelCase__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCAmelCase_ )] if c in letters else c for c in sequence )
def _lowerCAmelCase( ) -> None:
from timeit import timeit
print("""Running performance benchmarks...""" )
lowerCAmelCase__ = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=UpperCAmelCase_ )} seconds''' )
print(F'''> atbash(): {timeit("atbash(printable)" , setup=UpperCAmelCase_ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 211 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_UpperCamelCase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''maskformer'''
A__ = {'''hidden_size''': '''mask_feature_size'''}
A__ = ['''resnet''', '''swin''']
A__ = ['''detr''']
def __init__( self : Tuple , __A : int = 256 , __A : int = 256 , __A : float = 0.1 , __A : bool = False , __A : Optional[Dict] = None , __A : Optional[Dict] = None , __A : float = 0.0_2 , __A : float = 1.0 , __A : float = 1.0 , __A : float = 1.0 , __A : float = 2_0.0 , __A : Optional[bool] = None , **__A : Tuple , ) -> List[Any]:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCAmelCase__ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
lowerCAmelCase__ = backbone_config.pop("""model_type""" )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCAmelCase__ = DetrConfig()
else:
# verify that the decoder is supported
lowerCAmelCase__ = (
decoder_config.pop("""model_type""" ) if isinstance(__A , __A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(__A , __A ):
lowerCAmelCase__ = CONFIG_MAPPING[decoder_type]
lowerCAmelCase__ = config_class.from_dict(__A )
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = decoder_config
# main feature dimension for the model
lowerCAmelCase__ = fpn_feature_size
lowerCAmelCase__ = mask_feature_size
# initializer
lowerCAmelCase__ = init_std
lowerCAmelCase__ = init_xavier_std
# Hungarian matcher && loss
lowerCAmelCase__ = cross_entropy_weight
lowerCAmelCase__ = dice_weight
lowerCAmelCase__ = mask_weight
lowerCAmelCase__ = use_auxiliary_loss
lowerCAmelCase__ = no_object_weight
lowerCAmelCase__ = output_auxiliary_logits
lowerCAmelCase__ = self.decoder_config.encoder_attention_heads
lowerCAmelCase__ = self.decoder_config.num_hidden_layers
super().__init__(**__A )
@classmethod
def lowercase__ ( cls : List[Any] , __A : PretrainedConfig , __A : PretrainedConfig , **__A : Tuple ) -> Any:
'''simple docstring'''
return cls(
backbone_config=__A , decoder_config=__A , **__A , )
def lowercase__ ( self : Optional[int] ) -> Dict[str, any]:
'''simple docstring'''
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.decoder_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 211 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class snake_case ( nn.Module ):
def __init__( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :Union[str, Any]=0.0 , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :str = "geglu" , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :bool = False , _lowerCamelCase :bool = False , _lowerCamelCase :bool = False , _lowerCamelCase :bool = False , _lowerCamelCase :bool = True , _lowerCamelCase :str = "layer_norm" , _lowerCamelCase :bool = False , ):
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[Any] = only_cross_attention
__SCREAMING_SNAKE_CASE : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__SCREAMING_SNAKE_CASE : Optional[int] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AdaLayerNorm(_lowerCamelCase , _lowerCamelCase )
elif self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE : Any = AdaLayerNormZero(_lowerCamelCase , _lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : List[Any] = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = Attention(
query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , dropout=_lowerCamelCase , bias=_lowerCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_lowerCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
AdaLayerNorm(_lowerCamelCase , _lowerCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
)
__SCREAMING_SNAKE_CASE : int = Attention(
query_dim=_lowerCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_lowerCamelCase , dim_head=_lowerCamelCase , dropout=_lowerCamelCase , bias=_lowerCamelCase , upcast_attention=_lowerCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : List[str] = None
# 3. Feed-forward
__SCREAMING_SNAKE_CASE : Dict = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = FeedForward(_lowerCamelCase , dropout=_lowerCamelCase , activation_fn=_lowerCamelCase , final_dropout=_lowerCamelCase )
# let chunk size default to None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :Optional[int] , _lowerCamelCase :int ):
# Sets chunk feed-forward
__SCREAMING_SNAKE_CASE : Union[str, Any] = chunk_size
__SCREAMING_SNAKE_CASE : Any = dim
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :torch.FloatTensor , _lowerCamelCase :Optional[torch.FloatTensor] = None , _lowerCamelCase :Optional[torch.FloatTensor] = None , _lowerCamelCase :Optional[torch.FloatTensor] = None , _lowerCamelCase :Optional[torch.LongTensor] = None , _lowerCamelCase :Dict[str, Any] = None , _lowerCamelCase :Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.norma(_lowerCamelCase , _lowerCamelCase )
elif self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.norma(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hidden_dtype=hidden_states.dtype )
else:
__SCREAMING_SNAKE_CASE : List[str] = self.norma(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__SCREAMING_SNAKE_CASE : Optional[int] = self.attna(
_lowerCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_lowerCamelCase , **_lowerCamelCase , )
if self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
__SCREAMING_SNAKE_CASE : Tuple = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__SCREAMING_SNAKE_CASE : int = (
self.norma(_lowerCamelCase , _lowerCamelCase ) if self.use_ada_layer_norm else self.norma(_lowerCamelCase )
)
__SCREAMING_SNAKE_CASE : int = self.attna(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Optional[int] = attn_output + hidden_states
# 3. Feed-forward
__SCREAMING_SNAKE_CASE : str = self.norma(_lowerCamelCase )
if self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__SCREAMING_SNAKE_CASE : str = torch.cat(
[self.ff(_lowerCamelCase ) for hid_slice in norm_hidden_states.chunk(_lowerCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__SCREAMING_SNAKE_CASE : int = self.ff(_lowerCamelCase )
if self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
__SCREAMING_SNAKE_CASE : Optional[int] = ff_output + hidden_states
return hidden_states
class snake_case ( nn.Module ):
def __init__( self :Any , _lowerCamelCase :int , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :int = 4 , _lowerCamelCase :float = 0.0 , _lowerCamelCase :str = "geglu" , _lowerCamelCase :bool = False , ):
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[Any] = int(dim * mult )
__SCREAMING_SNAKE_CASE : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__SCREAMING_SNAKE_CASE : Optional[Any] = GELU(_lowerCamelCase , _lowerCamelCase )
if activation_fn == "gelu-approximate":
__SCREAMING_SNAKE_CASE : Tuple = GELU(_lowerCamelCase , _lowerCamelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
__SCREAMING_SNAKE_CASE : Tuple = GEGLU(_lowerCamelCase , _lowerCamelCase )
elif activation_fn == "geglu-approximate":
__SCREAMING_SNAKE_CASE : Optional[Any] = ApproximateGELU(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# project in
self.net.append(_lowerCamelCase )
# project dropout
self.net.append(nn.Dropout(_lowerCamelCase ) )
# project out
self.net.append(nn.Linear(_lowerCamelCase , _lowerCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Dict ):
for module in self.net:
__SCREAMING_SNAKE_CASE : Any = module(_lowerCamelCase )
return hidden_states
class snake_case ( nn.Module ):
def __init__( self :Any , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :str = "none" ):
super().__init__()
__SCREAMING_SNAKE_CASE : int = nn.Linear(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = approximate
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :List[str] ):
if gate.device.type != "mps":
return F.gelu(_lowerCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.proj(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = self.gelu(_lowerCamelCase )
return hidden_states
class snake_case ( nn.Module ):
def __init__( self :Any , _lowerCamelCase :int , _lowerCamelCase :int ):
super().__init__()
__SCREAMING_SNAKE_CASE : Any = nn.Linear(_lowerCamelCase , dim_out * 2 )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :str ):
if gate.device.type != "mps":
return F.gelu(_lowerCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.proj(_lowerCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_lowerCamelCase )
class snake_case ( nn.Module ):
def __init__( self :Any , _lowerCamelCase :int , _lowerCamelCase :int ):
super().__init__()
__SCREAMING_SNAKE_CASE : str = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , _lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : str = self.proj(_lowerCamelCase )
return x * torch.sigmoid(1.7_0_2 * x )
class snake_case ( nn.Module ):
def __init__( self :str , _lowerCamelCase :Optional[int] , _lowerCamelCase :str ):
super().__init__()
__SCREAMING_SNAKE_CASE : str = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = nn.SiLU()
__SCREAMING_SNAKE_CASE : Tuple = nn.Linear(_lowerCamelCase , embedding_dim * 2 )
__SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , _lowerCamelCase :Tuple , _lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.linear(self.silu(self.emb(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = torch.chunk(_lowerCamelCase , 2 )
__SCREAMING_SNAKE_CASE : List[str] = self.norm(_lowerCamelCase ) * (1 + scale) + shift
return x
class snake_case ( nn.Module ):
def __init__( self :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :List[str] ):
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = CombinedTimestepLabelEmbeddings(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = nn.SiLU()
__SCREAMING_SNAKE_CASE : Any = nn.Linear(_lowerCamelCase , 6 * embedding_dim , bias=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase , eps=1e-6 )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :str , _lowerCamelCase :List[str] , _lowerCamelCase :Dict , _lowerCamelCase :Dict=None ):
__SCREAMING_SNAKE_CASE : Tuple = self.linear(self.silu(self.emb(_lowerCamelCase , _lowerCamelCase , hidden_dtype=_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = emb.chunk(6 , dim=1 )
__SCREAMING_SNAKE_CASE : int = self.norm(_lowerCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class snake_case ( nn.Module ):
def __init__( self :List[Any] , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :Optional[str] = None , _lowerCamelCase :float = 1e-5 ):
super().__init__()
__SCREAMING_SNAKE_CASE : int = num_groups
__SCREAMING_SNAKE_CASE : List[str] = eps
if act_fn is None:
__SCREAMING_SNAKE_CASE : Tuple = None
else:
__SCREAMING_SNAKE_CASE : int = get_activation(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(_lowerCamelCase , out_dim * 2 )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :Dict , _lowerCamelCase :Any ):
if self.act:
__SCREAMING_SNAKE_CASE : int = self.act(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = self.linear(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = emb[:, :, None, None]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = emb.chunk(2 , dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = F.group_norm(_lowerCamelCase , self.num_groups , eps=self.eps )
__SCREAMING_SNAKE_CASE : Optional[Any] = x * (1 + scale) + shift
return x
| 674 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
_lowerCamelCase = '''▁'''
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Union[str, Any]="</s>" , _lowerCamelCase :List[Any]="<unk>" , _lowerCamelCase :Union[str, Any]="<pad>" , _lowerCamelCase :int=1_0_0 , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :Optional[Dict[str, Any]] = None , _lowerCamelCase :int=True , **_lowerCamelCase :List[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(_lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__SCREAMING_SNAKE_CASE : Optional[int] = len(set(filter(lambda _lowerCamelCase : bool('''extra_id''' in str(_lowerCamelCase ) ) , _lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = legacy
__SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , extra_ids=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_lowerCamelCase , **_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Tuple = vocab_file
__SCREAMING_SNAKE_CASE : List[str] = extra_ids
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _lowerCamelCase :str , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :int ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _lowerCamelCase , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + [1]
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return list(
set(filter(lambda _lowerCamelCase : bool(re.search(r'''<extra_id_\d+>''' , _lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
return [self._convert_token_to_id(_lowerCamelCase ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :List[int] ):
if len(_lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
__SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self._add_eos_if_not_present(_lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_eos_if_not_present(_lowerCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self :Optional[Any] , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :"TextInput" , **_lowerCamelCase :str ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__SCREAMING_SNAKE_CASE : Dict = SPIECE_UNDERLINE + text.replace(_lowerCamelCase , ''' ''' )
return super().tokenize(_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :List[Any] , **_lowerCamelCase :Dict ):
if not self.legacy:
__SCREAMING_SNAKE_CASE : str = text.startswith(_lowerCamelCase )
if is_first:
__SCREAMING_SNAKE_CASE : str = text[1:]
__SCREAMING_SNAKE_CASE : Tuple = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Optional[Any] ):
if token.startswith('''<extra_id_''' ):
__SCREAMING_SNAKE_CASE : Tuple = re.match(r'''<extra_id_(\d+)>''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :Optional[int] ):
if index < self.sp_model.get_piece_size():
__SCREAMING_SNAKE_CASE : List[Any] = self.sp_model.IdToPiece(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Dict = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Dict = ''''''
__SCREAMING_SNAKE_CASE : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = []
else:
current_sub_tokens.append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 674 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Any = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 545 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowercase_ ( _snake_case ):
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def lowercase_ ( _snake_case ,_snake_case ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
SCREAMING_SNAKE_CASE__ : int = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowercase_ ( _snake_case ,_snake_case ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def lowercase_ ( _snake_case ,_snake_case ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(_snake_case ,2 ) * torus_radius * tube_radius
def lowercase_ ( _snake_case ,_snake_case ):
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def lowercase_ ( _snake_case ):
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def lowercase_ ( _snake_case ,_snake_case ):
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
SCREAMING_SNAKE_CASE__ : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE__ : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowercase_ ( _snake_case ,_snake_case ):
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def lowercase_ ( _snake_case ,_snake_case ):
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def lowercase_ ( _snake_case ,_snake_case ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def lowercase_ ( _snake_case ,_snake_case ):
if not isinstance(_snake_case ,_snake_case ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""")
print(f"""Square: {area_square(1_0) = }""")
print(f"""Triangle: {area_triangle(1_0, 1_0) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""")
print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""")
print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""")
print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""")
print(f"""Circle: {area_circle(2_0) = }""")
print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(2_0) = }""")
print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""")
print(f"""Sphere: {surface_area_sphere(2_0) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""")
print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""")
print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""")
print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""")
print(f"""Square: {area_reg_polygon(4, 1_0) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
| 545 | 1 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
_UpperCAmelCase = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''tmp.sql''' )
_UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_UpperCAmelCase = iter_sql_file(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''tmp.sql''' )
_UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_UpperCAmelCase = iter_sql_file(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''tmp.sql''' )
_UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
with pytest.raises(_SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 602 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """vivit"""
def __init__( self : int , __UpperCamelCase : Union[str, Any]=2_2_4 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : str=[2, 1_6, 1_6] , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : int=7_6_8 , __UpperCamelCase : List[str]=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : Optional[Any]=3_0_7_2 , __UpperCamelCase : Optional[Any]="gelu_fast" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[str]=1e-06 , __UpperCamelCase : Union[str, Any]=True , **__UpperCamelCase : Any , )->Optional[int]:
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**__UpperCamelCase )
| 602 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ : Tuple = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = ["""input_features"""]
def __init__( self , snake_case__=80 , snake_case__=16000 , snake_case__=160 , snake_case__=30 , snake_case__=400 , snake_case__=0.0 , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
super().__init__(
feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
_SCREAMING_SNAKE_CASE : str = n_fft
_SCREAMING_SNAKE_CASE : int = hop_length
_SCREAMING_SNAKE_CASE : Tuple = chunk_length
_SCREAMING_SNAKE_CASE : Optional[int] = chunk_length * sampling_rate
_SCREAMING_SNAKE_CASE : Tuple = self.n_samples // hop_length
_SCREAMING_SNAKE_CASE : Any = sampling_rate
_SCREAMING_SNAKE_CASE : int = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case__ , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=snake_case__ , norm="slaney" , mel_scale="slaney" , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
snake_case__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_SCREAMING_SNAKE_CASE : str = log_spec[:, :-1]
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.maximum(snake_case__ , log_spec.max() - 8.0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
_SCREAMING_SNAKE_CASE : int = np.array(snake_case__ , np.intaa )
_SCREAMING_SNAKE_CASE : str = []
for vector, length in zip(snake_case__ , attention_mask.sum(-1 ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_SCREAMING_SNAKE_CASE : Optional[Any] = padding_value
normed_input_values.append(snake_case__ )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , snake_case__ , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = "max_length" , snake_case__ = None , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_SCREAMING_SNAKE_CASE : int = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_SCREAMING_SNAKE_CASE : List[Any] = is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_SCREAMING_SNAKE_CASE : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
_SCREAMING_SNAKE_CASE : Dict = np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_SCREAMING_SNAKE_CASE : Any = [np.asarray([raw_speech] ).T]
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_SCREAMING_SNAKE_CASE : List[str] = self.pad(
snake_case__ , padding=snake_case__ , max_length=max_length if max_length else self.n_samples , truncation=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_SCREAMING_SNAKE_CASE : Tuple = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_SCREAMING_SNAKE_CASE : str = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_SCREAMING_SNAKE_CASE : List[Any] = [self._np_extract_fbank_features(snake_case__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , snake_case__ ):
_SCREAMING_SNAKE_CASE : Any = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_features]
else:
_SCREAMING_SNAKE_CASE : int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_SCREAMING_SNAKE_CASE : Dict = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = padded_inputs.convert_to_tensors(snake_case__ )
return padded_inputs
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 295 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """naver-clova-ix/donut-base-finetuned-docvqa"""
A__ = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
A__ = """document_qa"""
A__ = AutoProcessor
A__ = VisionEncoderDecoderModel
A__ = ["""image""", """text"""]
A__ = ["""text"""]
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*snake_case__ , **snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_SCREAMING_SNAKE_CASE : Optional[int] = task_prompt.replace("{user_input}" , snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = self.pre_processor.tokenizer(
snake_case__ , add_special_tokens=snake_case__ , return_tensors="pt" ).input_ids
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.pre_processor(snake_case__ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case__ , ).sequences
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.pre_processor.batch_decode(snake_case__ )[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_SCREAMING_SNAKE_CASE : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_SCREAMING_SNAKE_CASE : Dict = re.sub(r"<.*?>" , "" , snake_case__ , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE : Dict = self.pre_processor.tokenajson(snake_case__ )
return sequence["answer"]
| 295 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ :Union[str, Any] = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ :Any = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ :Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "first_exhausted" ,):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,(Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
A_ , A_ : Optional[int] = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,stopping_strategy=_lowerCAmelCase )
else:
return _interleave_iterable_datasets(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,stopping_strategy=_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,):
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,(Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
A_ , A_ : Dict = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,axis=_lowerCAmelCase )
else:
return _concatenate_iterable_datasets(_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,axis=_lowerCAmelCase )
| 569 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Tuple = seq_length
__lowerCamelCase : str = is_training
__lowerCamelCase : int = use_input_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Optional[int] = type_vocab_size
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : str = num_labels
__lowerCamelCase : List[str] = num_choices
__lowerCamelCase : Tuple = relative_attention
__lowerCamelCase : Tuple = position_biased_input
__lowerCamelCase : Dict = pos_att_type
__lowerCamelCase : int = scope
def snake_case_ ( self ):
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase : int = None
if self.use_token_type_ids:
__lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Any = None
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case_ ( self ):
__lowerCamelCase : Any = self.get_config()
__lowerCamelCase : Dict = 300
return config
def snake_case_ ( self , __a ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : Tuple = DebertaModel(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : Tuple = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__lowerCamelCase : Optional[Any] = model(__a , token_type_ids=__a )[0]
__lowerCamelCase : Dict = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : List[Any] = DebertaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : List[str] = DebertaForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCamelCase : Dict = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : Optional[int] = self.num_labels
__lowerCamelCase : str = DebertaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : List[str] = DebertaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : Tuple = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : List[str] = config_and_inputs
__lowerCamelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : int = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : int = True
__a : Optional[Any] = False
__a : Tuple = False
__a : Optional[Any] = False
__a : Tuple = False
def snake_case_ ( self ):
__lowerCamelCase : Union[str, Any] = DebertaModelTester(self )
__lowerCamelCase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def snake_case_ ( self ):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def snake_case_ ( self ):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
@slow
def snake_case_ ( self ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Any = DebertaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def snake_case_ ( self ):
pass
@slow
def snake_case_ ( self ):
__lowerCamelCase : Dict = DebertaModel.from_pretrained('microsoft/deberta-base' )
__lowerCamelCase : str = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : List[Any] = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__lowerCamelCase : Optional[int] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 263 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a_ : Tuple = logging.get_logger(__name__)
# General docstring
a_ : List[str] = '''PoolFormerConfig'''
# Base docstring
a_ : Optional[Any] = '''sail/poolformer_s12'''
a_ : List[Any] = [1, 5_12, 7, 7]
# Image classification docstring
a_ : Any = '''sail/poolformer_s12'''
a_ : Optional[int] = '''tabby, tabby cat'''
a_ : Optional[Any] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase ( A__: Optional[Any] , A__: float = 0.0 , A__: bool = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCamelCase : Dict = 1 - drop_prob
__lowerCamelCase : List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCamelCase : List[Any] = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__lowerCamelCase : Any = input.div(A__ ) * random_tensor
return output
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a = None ):
super().__init__()
__lowerCamelCase : int = drop_prob
def snake_case_ ( self , __a ):
return drop_path(__a , self.drop_prob , self.training )
def snake_case_ ( self ):
return "p={}".format(self.drop_prob )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a=None ):
super().__init__()
__lowerCamelCase : int = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size)
__lowerCamelCase : int = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride)
__lowerCamelCase : Optional[int] = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding)
__lowerCamelCase : Optional[Any] = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a )
__lowerCamelCase : List[str] = norm_layer(__a ) if norm_layer else nn.Identity()
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = self.projection(__a )
__lowerCamelCase : Dict = self.norm(__a )
return embeddings
class __lowercase( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , __a , **__a ):
super().__init__(1 , __a , **__a )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : str = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a )
def snake_case_ ( self , __a ):
return self.pool(__a ) - hidden_states
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a ):
super().__init__()
__lowerCamelCase : Any = nn.Convad(__a , __a , 1 )
__lowerCamelCase : Dict = nn.Convad(__a , __a , 1 )
__lowerCamelCase : List[Any] = PoolFormerDropPath(__a )
if isinstance(config.hidden_act , __a ):
__lowerCamelCase : List[str] = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : str = config.hidden_act
def snake_case_ ( self , __a ):
__lowerCamelCase : int = self.conva(__a )
__lowerCamelCase : Dict = self.act_fn(__a )
__lowerCamelCase : List[str] = self.drop(__a )
__lowerCamelCase : int = self.conva(__a )
__lowerCamelCase : str = self.drop(__a )
return hidden_states
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a ):
super().__init__()
__lowerCamelCase : Tuple = PoolFormerPooling(__a )
__lowerCamelCase : Union[str, Any] = PoolFormerOutput(__a , __a , __a , __a )
__lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a )
__lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a )
# Useful for training neural nets
__lowerCamelCase : Any = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity()
__lowerCamelCase : Tuple = config.use_layer_scale
if config.use_layer_scale:
__lowerCamelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
__lowerCamelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
def snake_case_ ( self , __a ):
if self.use_layer_scale:
__lowerCamelCase : Union[str, Any] = self.pooling(self.before_norm(__a ) )
__lowerCamelCase : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__lowerCamelCase : Optional[Any] = hidden_states + self.drop_path(__a )
__lowerCamelCase : Tuple = ()
__lowerCamelCase : Optional[Any] = self.output(self.after_norm(__a ) )
__lowerCamelCase : List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__lowerCamelCase : List[Any] = hidden_states + self.drop_path(__a )
__lowerCamelCase : Optional[Any] = (output,) + outputs
return outputs
else:
__lowerCamelCase : Tuple = self.drop_path(self.pooling(self.before_norm(__a ) ) )
# First residual connection
__lowerCamelCase : List[str] = pooling_output + hidden_states
__lowerCamelCase : int = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCamelCase : List[str] = self.drop_path(self.output(self.after_norm(__a ) ) )
__lowerCamelCase : str = hidden_states + layer_output
__lowerCamelCase : int = (output,) + outputs
return outputs
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : int = config
# stochastic depth decay rule
__lowerCamelCase : int = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__lowerCamelCase : List[str] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__lowerCamelCase : Optional[int] = nn.ModuleList(__a )
# Transformer blocks
__lowerCamelCase : Any = []
__lowerCamelCase : int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__lowerCamelCase : Optional[int] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__a ) )
__lowerCamelCase : str = nn.ModuleList(__a )
def snake_case_ ( self , __a , __a=False , __a=True ):
__lowerCamelCase : Union[str, Any] = () if output_hidden_states else None
__lowerCamelCase : int = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__lowerCamelCase , __lowerCamelCase : Any = layers
# Get patch embeddings from hidden_states
__lowerCamelCase : Any = embedding_layer(__a )
# Send the embeddings through the blocks
for _, blk in enumerate(__a ):
__lowerCamelCase : Optional[int] = blk(__a )
__lowerCamelCase : Tuple = layer_outputs[0]
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a )
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Tuple = PoolFormerConfig
__a : Tuple = 'poolformer'
__a : Optional[int] = 'pixel_values'
__a : Optional[Any] = True
def snake_case_ ( self , __a ):
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_ ( self , __a , __a=False ):
if isinstance(__a , __a ):
__lowerCamelCase : Union[str, Any] = value
a_ : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a_ : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowercase__ , )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__(__a )
__lowerCamelCase : Optional[Any] = config
__lowerCamelCase : Any = PoolFormerEncoder(__a )
# Initialize weights and apply final processing
self.post_init()
def snake_case_ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_ ( self , __a = None , __a = None , __a = None , ):
__lowerCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__lowerCamelCase : Any = self.encoder(
__a , output_hidden_states=__a , return_dict=__a , )
__lowerCamelCase : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = self.dense(__a )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , lowercase__ , )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__(__a )
__lowerCamelCase : str = config.num_labels
__lowerCamelCase : Optional[Any] = PoolFormerModel(__a )
# Final norm
__lowerCamelCase : str = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__lowerCamelCase : Optional[Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_ ( self , __a = None , __a = None , __a = None , __a = None , ):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.poolformer(
__a , output_hidden_states=__a , return_dict=__a , )
__lowerCamelCase : int = outputs[0]
__lowerCamelCase : Optional[int] = self.classifier(self.norm(__a ).mean([-2, -1] ) )
__lowerCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : Any = 'single_label_classification'
else:
__lowerCamelCase : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : int = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase : Optional[Any] = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : Tuple = CrossEntropyLoss()
__lowerCamelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : List[Any] = BCEWithLogitsLoss()
__lowerCamelCase : Optional[Any] = loss_fct(__a , __a )
if not return_dict:
__lowerCamelCase : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
| 263 | 1 |
"""simple docstring"""
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = "" , SCREAMING_SNAKE_CASE__ = False ) -> None:
# Mapping from the first character of the prefix of the node
A__ = {}
# A node will be a leaf if the tree contains its word
A__ = is_leaf
A__ = prefix
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> tuple[str, str, str]:
A__ = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> None:
for word in words:
self.insert(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
A__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A__ = RadixNode(prefix=SCREAMING_SNAKE_CASE__ , is_leaf=SCREAMING_SNAKE_CASE__ )
else:
A__ = self.nodes[word[0]]
A__ , A__ , A__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A__ = remaining_prefix
A__ = self.nodes[matching_string[0]]
A__ = RadixNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = aux_node
if remaining_word == "":
A__ = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> bool:
A__ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE__ )
if not incoming_node:
return False
else:
A__ , A__ , A__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> bool:
A__ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE__ )
if not incoming_node:
return False
else:
A__ , A__ , A__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A__ = list(self.nodes.values() )[0]
A__ = merging_node.is_leaf
self.prefix += merging_node.prefix
A__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A__ = False
# If there is 1 edge, we merge it with its child
else:
A__ = list(incoming_node.nodes.values() )[0]
A__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A__ = merging_node.nodes
return True
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = 0 ) -> None:
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _lowerCamelCase ( ) -> bool:
"""simple docstring"""
A__ = "banana bananas bandana band apple all beast".split()
A__ = RadixNode()
root.insert_many(UpperCAmelCase_ )
assert all(root.find(UpperCAmelCase_ ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
assert test_trie()
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = RadixNode()
A__ = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(UpperCAmelCase_ )
print("Words:", UpperCAmelCase_ )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 104 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
__lowercase : Union[str, Any] = state_dict.pop(__lowerCAmelCase )
__lowercase : Optional[Any] = val
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
__lowercase : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowercase : List[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__lowercase : Tuple = value
else:
__lowercase : Union[str, Any] = value
return new_state_dict
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> List[Any]:
__lowercase : Tuple = ''''''
if is_panoptic:
__lowercase : Optional[int] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowercase : List[str] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__lowercase : Union[str, Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : int = in_proj_weight[:256, :]
__lowercase : List[str] = in_proj_bias[:256]
__lowercase : Tuple = in_proj_weight[256:512, :]
__lowercase : Tuple = in_proj_bias[256:512]
__lowercase : str = in_proj_weight[-256:, :]
__lowercase : Optional[Any] = in_proj_bias[-256:]
def UpperCAmelCase_ ( ) -> List[str]:
__lowercase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
__lowercase : Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowercase : int = '''resnet101'''
if "dc5" in model_name:
__lowercase : Optional[Any] = True
__lowercase : int = '''panoptic''' in model_name
if is_panoptic:
__lowercase : Optional[int] = 250
else:
__lowercase : List[Any] = 91
__lowercase : int = '''huggingface/label-files'''
__lowercase : int = '''coco-detection-id2label.json'''
__lowercase : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Union[str, Any] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase : List[Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load image processor
__lowercase : Optional[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowercase : List[str] = ConditionalDetrImageProcessor(format=__lowerCAmelCase )
# prepare image
__lowercase : Union[str, Any] = prepare_img()
__lowercase : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
__lowercase : str = encoding['''pixel_values''']
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
__lowercase : Tuple = torch.hub.load('''DeppMeng/ConditionalDETR''' , __lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
__lowercase : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowercase : Tuple = '''conditional_detr.''' + src
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowercase : Any = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase , is_panoptic=__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowercase : Any = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__lowercase : Union[str, Any] = state_dict.pop(__lowerCAmelCase )
__lowercase : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowercase : str = state_dict.pop(__lowerCAmelCase )
__lowercase : Tuple = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__lowercase : Any = state_dict.pop(__lowerCAmelCase )
__lowercase : List[str] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__lowercase : Any = state_dict.pop(__lowerCAmelCase )
__lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
__lowercase : Union[str, Any] = ConditionalDetrForSegmentation(__lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=__lowerCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__lowercase : Optional[Any] = conditional_detr(__lowerCAmelCase )
__lowercase : Dict = model(__lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 509 | 0 |
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__lowerCamelCase = {}
def lowerCamelCase ( self ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(__UpperCAmelCase , ''' -> ''' , ''' -> '''.join([str(__UpperCAmelCase ) for j in self.vertex[i]] ) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__UpperCAmelCase )
else:
# else make a new vertex
__lowerCamelCase = [to_vertex]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = True
print(__UpperCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
a_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 708 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 0 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]) -> List[str]:
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''')
if not ops[op](version.parse(lowerCAmelCase__) , version.parse(lowerCAmelCase__)):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''')
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None) -> None:
'''simple docstring'''
_lowercase : Tuple = F'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , lowerCAmelCase__):
_lowercase , _lowercase , _lowercase : List[str] = requirement, None, None
else:
_lowercase : Union[str, Any] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , lowerCAmelCase__)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F''' got {requirement}''')
_lowercase , _lowercase : List[Any] = match[0]
_lowercase : Union[str, Any] = want_full.split(',') # there could be multiple requirements
_lowercase : Any = {}
for w in want_range:
_lowercase : Tuple = re.findall(r'^([\s!=<>]{1,2})(.+)' , lowerCAmelCase__)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F''' but got {requirement}''')
_lowercase , _lowercase : Any = match[0]
_lowercase : Dict = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys())}, but got {op}''')
# special case
if pkg == "python":
_lowercase : List[Any] = '.'.join([str(lowerCAmelCase__) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
return
# check if any version is installed
try:
_lowercase : Dict = importlib.metadata.version(lowerCAmelCase__)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''')
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any) -> int:
'''simple docstring'''
_lowercase : Optional[int] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(lowerCAmelCase__ , lowerCAmelCase__) | 125 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : str ,UpperCamelCase : Any ,UpperCamelCase : int ) -> Dict:
super().__init__()
self.register_modules(unet=UpperCamelCase ,scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : Any ,UpperCamelCase : int = 1 ,UpperCamelCase : int = 100 ,UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,UpperCamelCase : Optional[float] = None ,UpperCamelCase : bool = True ,) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
_lowercase : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
_lowercase : List[str] = audio_length_in_s * self.unet.config.sample_rate
_lowercase : Optional[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
_lowercase : Optional[int] = int(UpperCamelCase )
if sample_size % down_scale_factor != 0:
_lowercase : Dict = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
_lowercase : Optional[Any] = int(UpperCamelCase )
_lowercase : List[Any] = next(iter(self.unet.parameters() ) ).dtype
_lowercase : Optional[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCamelCase ,UpperCamelCase ) and len(UpperCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowercase : int = randn_tensor(UpperCamelCase ,generator=UpperCamelCase ,device=self.device ,dtype=UpperCamelCase )
# set step values
self.scheduler.set_timesteps(UpperCamelCase ,device=audio.device )
_lowercase : Optional[Any] = self.scheduler.timesteps.to(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowercase : str = self.unet(UpperCamelCase ,UpperCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_lowercase : Dict = self.scheduler.step(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample
_lowercase : Optional[int] = audio.clamp(-1 ,1 ).float().cpu().numpy()
_lowercase : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCamelCase ) | 125 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__snake_case : List[str] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=A , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=A , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=A )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
__snake_case : Dict = parse_args()
# Import training_script as a module.
__snake_case : List[str] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__snake_case : Optional[Any] = script_fpath.stem
__snake_case : Optional[int] = importlib.import_module(A )
# Patch sys.argv
__snake_case : Union[str, Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 713 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : list ) -> list:
"""simple docstring"""
__snake_case : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
__snake_case : Optional[Any] = True
for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : List[Any] = False
for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : Any = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 61 | 0 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE ={
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 477 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = IFImgaImgSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=0 ) -> List[str]:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
lowercase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
lowercase_ : int = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowercase_ : Optional[int] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowercase_ : List[str] = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowercase_ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self._test_save_load_local()
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 477 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Tuple = 16
a__ : Any = 32
def _lowerCAmelCase ( A__ , A__ = 16 ):
lowercase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowercase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : Optional[Any] = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( A__ , A__ ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['lr']
lowercase__ = int(config['num_epochs'] )
lowercase__ = int(config['seed'] )
lowercase__ = int(config['batch_size'] )
lowercase__ = evaluate.load('glue' , 'mrpc' )
set_seed(A__ )
lowercase__, lowercase__ = get_dataloaders(A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A__ ):
lowercase__ = model(**A__ )
lowercase__ = output.loss
accelerator.backward(A__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__, lowercase__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A__ )
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=A__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowercase__ = parser.parse_args()
lowercase__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 622 |
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Any = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
a__ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
a__ : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=None) -> Any:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase)),
}
| 622 | 1 |
'''simple docstring'''
lowercase__ : List[Any] = 2_56
# Modulus to hash a string
lowercase__ : Union[str, Any] = 1_00_00_03
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> bool:
__A : List[Any] = len(a_ )
__A : List[Any] = len(a_ )
if p_len > t_len:
return False
__A : Optional[int] = 0
__A : str = 0
__A : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A : int = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A : Tuple = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _lowerCAmelCase ( ) -> None:
__A : Any = '''abc1abc12'''
__A : str = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__A : List[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A : List[str] = '''ABABX'''
__A : Optional[int] = '''ABABZABABYABABX'''
assert rabin_karp(a_ , a_ )
# Test 3)
__A : List[Any] = '''AAAB'''
__A : int = '''ABAAAAAB'''
assert rabin_karp(a_ , a_ )
# Test 4)
__A : Tuple = '''abcdabcy'''
__A : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(a_ , a_ )
# Test 5)
__A : List[Any] = '''Lü'''
__A : List[str] = '''Lüsai'''
assert rabin_karp(a_ , a_ )
__A : Optional[int] = '''Lue'''
assert not rabin_karp(a_ , a_ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 718 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> str:
__A : int = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : Any = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Any = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=__snake_case , output_all_encodings=__snake_case , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , __snake_case ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Optional[int] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : int = os.path.join(get_home_dir() , 'models' )
__A : Optional[int] = _load_vocab(__snake_case , __snake_case , __snake_case , cls=__snake_case )
__A : List[Any] = nlp.model.BERTModel(
__snake_case , len(__snake_case ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=__snake_case , use_token_type_embed=__snake_case , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=__snake_case , use_decoder=__snake_case , )
original_bort.load_parameters(__snake_case , cast_dtype=__snake_case , ignore_extra=__snake_case )
__A : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Dict = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(__snake_case ),
}
__A : Optional[int] = BertConfig.from_dict(__snake_case )
__A : Any = BertForMaskedLM(__snake_case )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__snake_case : List[str] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__snake_case : Union[str, Any] , __snake_case : int ):
__A : str = hf_param.shape
__A : int = to_torch(params[gluon_param] )
__A : Any = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Optional[int] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__A : List[Any] = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__A : Optional[Any] = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__A : str = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
__A : Union[str, Any] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
__A : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
__A : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : Optional[int] = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__A : str = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__A : BertOutput = layer.output
__A : List[str] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__A : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__A : List[Any] = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__A : Any = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : List[Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__A : int = tokenizer.encode_plus(__snake_case )['input_ids']
# Get gluon output
__A : List[Any] = mx.nd.array([input_ids] )
__A : Optional[int] = original_bort(inputs=__snake_case , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__snake_case )
__A : List[Any] = BertModel.from_pretrained(__snake_case )
hf_bort_model.eval()
__A : Dict = tokenizer.encode_plus(__snake_case , return_tensors='pt' )
__A : List[Any] = hf_bort_model(**__snake_case )[0]
__A : Tuple = output_gluon[0].asnumpy()
__A : Any = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : str = np.allclose(__snake_case , __snake_case , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , __snake_case )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : List[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 338 | 0 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
SCREAMING_SNAKE_CASE_ = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
SCREAMING_SNAKE_CASE_ = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
SCREAMING_SNAKE_CASE_ = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def a_ ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def a_ ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=1 , lowercase_="binary" , lowercase_=None , lowercase_="warn" , ) -> List[str]:
UpperCAmelCase = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 373 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_lowercase : Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
_lowercase : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _UpperCamelCase ( a__ ):
"""simple docstring"""
lowerCAmelCase = "whisper"
lowerCAmelCase = ["past_key_values"]
lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=5_1865 , a__=80 , a__=6 , a__=4 , a__=6 , a__=4 , a__=1536 , a__=1536 , a__=0.0 , a__=0.0 , a__=5_0257 , a__=True , a__=True , a__="gelu" , a__=256 , a__=0.0 , a__=0.0 , a__=0.0 , a__=0.02 , a__=False , a__=1500 , a__=448 , a__=5_0256 , a__=5_0256 , a__=5_0256 , a__=None , a__=[220, 5_0256] , a__=False , a__=256 , a__=False , a__=0.05 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__=7 , **a__ , ) -> Tuple:
A = vocab_size
A = num_mel_bins
A = d_model
A = encoder_layers
A = encoder_attention_heads
A = decoder_layers
A = decoder_attention_heads
A = decoder_ffn_dim
A = encoder_ffn_dim
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_source_positions
A = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A = classifier_proj_size
A = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A = apply_spec_augment
A = mask_time_prob
A = mask_time_length
A = mask_time_min_masks
A = mask_feature_prob
A = mask_feature_length
A = mask_feature_min_masks
A = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , suppress_tokens=lowerCamelCase_ , begin_suppress_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
class _UpperCamelCase ( a__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
A = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
A = {0: """batch"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="""inputs""" )
return common_inputs
def _UpperCAmelCase ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , a__ = 2_2050 , a__ = 5.0 , a__ = 220 , ) -> Mapping[str, Any]:
A = OrderedDict()
A = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase_ , framework=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , time_duration=lowerCamelCase_ , frequency=lowerCamelCase_ , )
A = encoder_inputs["""input_features"""].shape[2]
A = encoder_sequence_length // 2 if self.use_past else seq_length
A = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A = encoder_inputs.pop("""input_features""" )
A = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
A = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-3 | 710 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = ['image_processor', 'tokenizer']
lowerCAmelCase = 'ViltImageProcessor'
lowerCAmelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , a__=None , a__=None , **a__ ) -> Any:
A = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a__ , )
A = kwargs.pop("""feature_extractor""" )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a__ , a__ )
A = self.image_processor
def __call__( self , a__ , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ) -> BatchEncoding:
A = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
# add pixel_values + pixel_mask
A = self.image_processor(a__ , return_tensors=a__ )
encoding.update(a__ )
return encoding
def _UpperCAmelCase ( self , *a__ , **a__ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*a__ , **a__ )
def _UpperCAmelCase ( self , *a__ , **a__ ) -> Any:
return self.tokenizer.decode(*a__ , **a__ )
@property
def _UpperCAmelCase ( self ) -> int:
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self ) -> str:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a__ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ) -> str:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a__ , )
return self.image_processor
| 546 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModel)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None ) -> int:
"""simple docstring"""
__magic_name__ = self.layer[current_layer](UpperCamelCase__ , UpperCamelCase__ , head_mask[current_layer] )
__magic_name__ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _A , )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
super().__init__(UpperCamelCase__ )
__magic_name__ = BertEncoderWithPabee(UpperCamelCase__ )
self.init_weights()
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = threshold
def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = patience
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = 0
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.inference_layers_num / self.inference_instances_num
__magic_name__ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(UpperCamelCase__ )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=False , ) -> List[str]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__magic_name__ = input_ids.size()
elif inputs_embeds is not None:
__magic_name__ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__magic_name__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__magic_name__ = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
if token_type_ids is None:
__magic_name__ = torch.zeros(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__magic_name__ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__magic_name__ , __magic_name__ , __magic_name__ = encoder_hidden_states.size()
__magic_name__ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__magic_name__ = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
__magic_name__ = self.invert_attention_mask(UpperCamelCase__ )
else:
__magic_name__ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__magic_name__ = self.get_head_mask(UpperCamelCase__ , self.config.num_hidden_layers )
__magic_name__ = self.embeddings(
input_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ )
__magic_name__ = embedding_output
if self.training:
__magic_name__ = []
for i in range(self.config.num_hidden_layers ):
__magic_name__ = self.encoder.adaptive_forward(
UpperCamelCase__ , current_layer=UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ )
__magic_name__ = self.pooler(UpperCamelCase__ )
__magic_name__ = output_layers[i](output_dropout(UpperCamelCase__ ) )
res.append(UpperCamelCase__ )
elif self.patience == 0: # Use all layers for inference
__magic_name__ = self.encoder(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__magic_name__ = self.pooler(encoder_outputs[0] )
__magic_name__ = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase__ )]
else:
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__magic_name__ = self.encoder.adaptive_forward(
UpperCamelCase__ , current_layer=UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ )
__magic_name__ = self.pooler(UpperCamelCase__ )
__magic_name__ = output_layers[i](UpperCamelCase__ )
if regression:
__magic_name__ = logits.detach()
if patient_result is not None:
__magic_name__ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__magic_name__ = 0
else:
__magic_name__ = logits.detach().argmax(dim=1 )
if patient_result is not None:
__magic_name__ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase__ ) ):
patient_counter += 1
else:
__magic_name__ = 0
__magic_name__ = logits
if patient_counter == self.patience:
break
__magic_name__ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _A , )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
super().__init__(UpperCamelCase__ )
__magic_name__ = config.num_labels
__magic_name__ = BertModelWithPabee(UpperCamelCase__ )
__magic_name__ = nn.Dropout(config.hidden_dropout_prob )
__magic_name__ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : int=None , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=None , ) -> Dict:
"""simple docstring"""
__magic_name__ = self.bert(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__magic_name__ = (logits[-1],)
if labels is not None:
__magic_name__ = None
__magic_name__ = 0
for ix, logits_item in enumerate(UpperCamelCase__ ):
if self.num_labels == 1:
# We are doing regression
__magic_name__ = MSELoss()
__magic_name__ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__magic_name__ = CrossEntropyLoss()
__magic_name__ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__magic_name__ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__magic_name__ = (total_loss / total_weights,) + outputs
return outputs
| 76 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase_ :
'''simple docstring'''
a__ = None
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
__magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
__magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 76 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 1 |
import re
def lowerCAmelCase__ ( _a : str ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def lowerCAmelCase__ ( _a : str ):
snake_case_ : List[str] = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase__ ( _a : str , _a : bool , _a : str ):
try:
snake_case_ : Dict = split_input(_a )
if upper:
snake_case_ : Optional[int] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
snake_case_ : List[str] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase__ ( _a : str ):
return to_simple_case(_a )
def lowerCAmelCase__ ( _a : str ):
try:
snake_case_ : str = to_simple_case(_a )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase__ ( _a : str , _a : bool ):
return to_complex_case(_a , _a , "_" )
def lowerCAmelCase__ ( _a : str , _a : bool ):
return to_complex_case(_a , _a , "-" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 114 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'donut-swin'
A : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = image_size
snake_case_ : Any = patch_size
snake_case_ : str = num_channels
snake_case_ : Dict = embed_dim
snake_case_ : Tuple = depths
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = num_heads
snake_case_ : Optional[int] = window_size
snake_case_ : Any = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : str = drop_path_rate
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[int] = use_absolute_embeddings
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Any = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
| 114 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase__ : Dict = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def snake_case__ ( cls : str ) -> List[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
_UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_UpperCamelCase = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ , repo_id='''test-model-flax''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
_UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_UpperCamelCase = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 , msg=f"""{key} not identical""" )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
_UpperCamelCase = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_UpperCamelCase = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
_UpperCamelCase = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_UpperCamelCase = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 , msg=f"""{key} not identical""" )
def a__ ( lowercase : Dict, lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = flatten_dict(modela.params )
_UpperCamelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
_UpperCamelCase = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
_UpperCamelCase = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.assertTrue(check_models_equal(lowerCAmelCase__ , lowerCAmelCase__ ) )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
_UpperCamelCase = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.assertTrue(check_models_equal(lowerCAmelCase__ , lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = '''bert'''
_UpperCamelCase = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = '''bert'''
_UpperCamelCase = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
| 98 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , **snake_case_ , ) -> Dict:
'''simple docstring'''
super().__init__(
snake_case_ , split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__lowercase = path_or_paths if isinstance(snake_case_ , snake_case_ ) else {self.split: path_or_paths}
__lowercase = Text(
cache_dir=snake_case_ , data_files=snake_case_ , features=snake_case_ , **snake_case_ , )
def A ( self ) -> int:
'''simple docstring'''
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__lowercase = self.builder.as_dataset(
split=self.split , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
| 639 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase )
return image
@property
def _A ( self: Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _A ( self: Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _A ( self: List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__UpperCamelCase )
@property
def _A ( self: Tuple ):
def extract(*_lowerCamelCase: str , **_lowerCamelCase: Optional[Any] ):
class __magic_name__ :
'''simple docstring'''
def __init__( self: List[str] ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def _A ( self: Optional[Any] , _lowerCamelCase: Tuple ):
self.pixel_values.to(__UpperCamelCase )
return self
return Out()
return extract
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__UpperCamelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(__UpperCamelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=__UpperCamelCase , num_inference_steps=2 , output_type='''np''' , image=__UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((7_60, 5_04) )
SCREAMING_SNAKE_CASE_ = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
SCREAMING_SNAKE_CASE_ = init_image.resize((7_68, 5_12) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
SCREAMING_SNAKE_CASE_ = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 702 |
import requests
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = {'''Content-Type''': '''application/json'''}
SCREAMING_SNAKE_CASE_ = requests.post(_lowerCAmelCase , json={'''text''': message_body} , headers=_lowerCAmelCase )
if response.status_code != 2_0_0:
SCREAMING_SNAKE_CASE_ = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 89 | 0 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Dict: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : str
class __magic_name__ ( lowercase__ ):
def lowerCAmelCase ( self : Tuple ):
__snake_case = {}
__snake_case = []
__snake_case = 1
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = {}
__snake_case = []
__snake_case = 2
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
__snake_case = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
__snake_case = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
__snake_case = {"a": 2, "b": 0, "c": 2}
__snake_case = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def lowerCAmelCase ( self : str ):
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": 3, "b": 4}
__snake_case = {"a": 5, "b": 6}
__snake_case = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def lowerCAmelCase ( self : int ):
class __magic_name__ :
_SCREAMING_SNAKE_CASE : Optional[Any] = 'bar'
__snake_case = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
__snake_case = {F'''{i}''': i for i in range(SCREAMING_SNAKE_CASE )}
__snake_case = map_nested(lambda SCREAMING_SNAKE_CASE : x + 10 , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __magic_name__ ( lowercase__ ):
@require_tf
def lowerCAmelCase ( self : List[str] ):
import tensorflow as tf
from tensorflow.keras import layers
__snake_case = layers.Dense(2 )
def gen_random_output():
__snake_case = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(42 , set_tensorflow=snake_case_ ):
__snake_case = gen_random_output()
with temp_seed(42 , set_tensorflow=snake_case_ ):
__snake_case = gen_random_output()
__snake_case = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCAmelCase ( self : str ):
import torch
def gen_random_output():
__snake_case = torch.nn.Linear(3 , 2 )
__snake_case = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(42 , set_pytorch=snake_case_ ):
__snake_case = gen_random_output()
with temp_seed(42 , set_pytorch=snake_case_ ):
__snake_case = gen_random_output()
__snake_case = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCAmelCase ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__snake_case = gen_random_output()
with temp_seed(42 ):
__snake_case = gen_random_output()
__snake_case = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = NestedDataStructure(SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = NestedDataStructure(SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
__snake_case = A(x=1 , y="foobar" )
__snake_case = {"x": 1, "y": "foobar"}
assert asdict(SCREAMING_SNAKE_CASE ) == expected_output
__snake_case = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
__snake_case = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y="foo" )] )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return text.split()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
__snake_case = list(iflatmap_unordered(SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__snake_case = list(iflatmap_unordered(SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__snake_case = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(SCREAMING_SNAKE_CASE ) == 4
| 163 |
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 | 1 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = (EulerDiscreteScheduler,)
UpperCAmelCase_ : str = 1_0
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__lowerCAmelCase)
return config
def a_ ( self):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(__lowerCAmelCase)
for i, t in enumerate(scheduler.timesteps):
lowerCAmelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase)
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(__lowerCAmelCase))
lowerCAmelCase = torch.mean(torch.abs(__lowerCAmelCase))
assert abs(result_sum.item() - 10.0807) < 1E-2
assert abs(result_mean.item() - 0.0131) < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""")
lowerCAmelCase = scheduler_class(**__lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(__lowerCAmelCase)
for i, t in enumerate(scheduler.timesteps):
lowerCAmelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase)
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(__lowerCAmelCase))
lowerCAmelCase = torch.mean(torch.abs(__lowerCAmelCase))
assert abs(result_sum.item() - 0.0002) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase = sample.to(__lowerCAmelCase)
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase)
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(__lowerCAmelCase))
lowerCAmelCase = torch.mean(torch.abs(__lowerCAmelCase))
assert abs(result_sum.item() - 10.0807) < 1E-2
assert abs(result_mean.item() - 0.0131) < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__lowerCAmelCase , use_karras_sigmas=__lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase = sample.to(__lowerCAmelCase)
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase)
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(__lowerCAmelCase))
lowerCAmelCase = torch.mean(torch.abs(__lowerCAmelCase))
assert abs(result_sum.item() - 124.52299499511719) < 1E-2
assert abs(result_mean.item() - 0.16213932633399963) < 1E-3
| 605 | '''simple docstring'''
from math import ceil, sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 605 | 1 |
from __future__ import annotations
from math import pow, sqrt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowercase , 2 ) + pow(_lowercase , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__ ):
a_ = int(np.ceil((x_end - xa) / step_size ) )
a_ = np.zeros((n + 1,) )
a_ = ya
a_ = xa
for k in range(A__ ):
a_ = y[k] + step_size * ode_func(A__ , y[k] )
a_ = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ , A__ , A__ , A__ ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a_ , a_ = array[indexa], array[indexa]
def UpperCamelCase_ ( A__ , A__ , A__ , A__ ):
if length > 1:
a_ = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def UpperCamelCase_ ( A__ , A__ , A__ , A__ ):
if length > 1:
a_ = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
lowercase__ =input('Enter numbers separated by a comma:\n').strip()
lowercase__ =[int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 511 | 1 |
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
a = b * b - 4 * a * c
a = (-b + sqrt(lowerCamelCase__ )) / (2 * a)
a = (-b - sqrt(lowerCamelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
a , a = quadratic_roots(a=5, b=6, c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 387 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase : Any = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ["audio_values", "audio_mask"]
def __init__( self , A=20_48 , A=1 , A=[16, 16] , A=1_28 , A=4_41_00 , A=86 , A=20_48 , A=0.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A , A = None , A = True , A = None , A = False , A = False , **A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 457 | 0 |
"""simple docstring"""
from math import factorial
__UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def __UpperCAmelCase ( _snake_case : int ):
if not isinstance(__UpperCamelCase, __UpperCamelCase ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) )
def __UpperCAmelCase ( _snake_case : int = 6_0, _snake_case : int = 1_0_0_0_0_0_0 ):
if not isinstance(__UpperCamelCase, __UpperCamelCase ) or not isinstance(__UpperCamelCase, __UpperCamelCase ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
_lowercase = 0
# the cached sizes of the previous chains
_lowercase = {}
for start_chain_element in range(1, __UpperCamelCase ):
# The temporary set will contain the elements of the chain
_lowercase = set()
_lowercase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_lowercase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCamelCase )
chain_set_length += 1
_lowercase = digit_factorial_sum(__UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_lowercase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''') | 711 | """simple docstring"""
def __UpperCAmelCase ( _snake_case : int ):
if num < 0:
return False
_lowercase = num
_lowercase = 0
while num > 0:
_lowercase = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod() | 227 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase_ ( _lowerCamelCase: Sequence[float] , _lowerCamelCase: bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__lowerCamelCase : int = 0 if allow_empty_subarrays else float("-inf" )
__lowerCamelCase : int = 0.0
for num in arr:
__lowerCamelCase : Tuple = max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowerCamelCase : Optional[int] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""") | 646 | """simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 32 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCAmelCase : bool = True , UpperCAmelCase : str=7 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : int=400 , UpperCAmelCase : Any=3 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Tuple = do_resize
__lowerCamelCase : Tuple = size if size is not None else {"shortest_edge": 288}
__lowerCamelCase : List[Any] = size_divisor
__lowerCamelCase : List[str] = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Optional[int] = do_normalize
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : List[Any] = image_mean
__lowerCamelCase : Tuple = image_std
__lowerCamelCase : Any = do_pad
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Optional[int] = min_resolution
__lowerCamelCase : Optional[int] = max_resolution
def lowerCamelCase__ ( self : Tuple ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]=False ):
if not batched:
__lowerCamelCase : List[str] = self.size["shortest_edge"]
__lowerCamelCase : Optional[int] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__lowerCamelCase , __lowerCamelCase : str = image.size
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__lowerCamelCase : Optional[Any] = size / min(UpperCAmelCase , UpperCAmelCase )
if h < w:
__lowerCamelCase , __lowerCamelCase : int = size, scale * w
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = scale * h, size
__lowerCamelCase : Optional[Any] = int((1333 / 800) * size )
if max(UpperCAmelCase , UpperCAmelCase ) > max_size:
__lowerCamelCase : Union[str, Any] = max_size / max(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = newh * scale
__lowerCamelCase : str = neww * scale
__lowerCamelCase , __lowerCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCamelCase , __lowerCamelCase : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCamelCase : Optional[int] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : Any = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCamelCase : Any = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size_divisor" ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Optional[Any] ):
# Initialize image processor
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[Any] ):
# Initialize image processor
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[int] ):
# Initialize image processor
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , ) | 646 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] =16
lowerCAmelCase : Dict =32
def A__ ( __A , __A = 16 ):
'''simple docstring'''
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCamelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Tuple = datasets.map(
__A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : int = 8
else:
_lowerCamelCase : Optional[int] = None
return tokenizer.pad(
__A , padding="""longest""" , max_length=__A , pad_to_multiple_of=__A , return_tensors="""pt""" , )
# Instantiate dataloaders.
_lowerCamelCase : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
_lowerCamelCase : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : List[Any] =mocked_dataloaders # noqa: F811
def A__ ( __A , __A ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __A ) == "1":
_lowerCamelCase : Any = 2
# New Code #
_lowerCamelCase : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCamelCase : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__A )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : List[str] = config["""lr"""]
_lowerCamelCase : Tuple = int(config["""num_epochs"""] )
_lowerCamelCase : Tuple = int(config["""seed"""] )
_lowerCamelCase : Union[str, Any] = int(config["""batch_size"""] )
_lowerCamelCase : List[str] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__A )
_lowerCamelCase : str = get_dataloaders(__A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : str = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : List[str] = AdamW(params=model.parameters() , lr=__A )
# Instantiate scheduler
_lowerCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase : str = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__A ):
_lowerCamelCase : Dict = model(**__A )
_lowerCamelCase : str = output.loss
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__A )
_lowerCamelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
_lowerCamelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__A , references=__A , )
_lowerCamelCase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__A , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_lowerCamelCase : Any = parser.parse_args()
_lowerCamelCase : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = GPTaTokenizer
lowerCAmelCase = GPTaTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = {'''add_prefix_space''': True}
lowerCAmelCase = False
def a__ ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
UpperCAmelCase_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ : Optional[int] = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Optional[int] = '''lower newer'''
UpperCAmelCase_ : List[str] = '''lower newer'''
return input_text, output_text
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
# Testing tokenization
UpperCAmelCase_ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
UpperCAmelCase_ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
UpperCAmelCase_ : str = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Testing the unknown token
UpperCAmelCase_ : Dict = tokens + [rust_tokenizer.unk_token]
UpperCAmelCase_ : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def a__ ( self ,_SCREAMING_SNAKE_CASE=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
# Simple input
UpperCAmelCase_ : str = '''This is a simple input'''
UpperCAmelCase_ : str = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : int = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,)
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,)
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='''<pad>''' )
# Simple input
UpperCAmelCase_ : str = '''This is a simple input'''
UpperCAmelCase_ : Tuple = ['''This is a simple input looooooooong''', '''This is a simple input''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Optional[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
UpperCAmelCase_ : Union[str, Any] = tokenizer.pad_token_id
UpperCAmelCase_ : Dict = tokenizer(_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,max_length=30 ,return_tensors='''np''' )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncate=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Tuple = tokenizer(*_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,max_length=60 ,return_tensors='''np''' )
UpperCAmelCase_ : List[Any] = tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncate=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[Any] = '''$$$'''
UpperCAmelCase_ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=_SCREAMING_SNAKE_CASE ,add_bos_token=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''This is a simple input'''
UpperCAmelCase_ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : List[Any] = tokenizer.bos_token_id
UpperCAmelCase_ : List[str] = tokenizer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] ,_SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase_ : Tuple = tokenizer.decode(out_s.input_ids )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,_SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def a__ ( self ) -> str:
pass
def a__ ( self ) -> int:
# TODO: change to self.get_tokenizers() when the fast version is implemented
UpperCAmelCase_ : Tuple = [self.get_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ,add_bos_token=_SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ : List[Any] = '''Encode this.'''
UpperCAmelCase_ : List[Any] = '''This one too please.'''
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = tokenizer.encode_plus(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,return_special_tokens_mask=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : int = encoded_sequence_dict['''input_ids''']
UpperCAmelCase_ : Union[str, Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_SCREAMING_SNAKE_CASE )
]
UpperCAmelCase_ : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = '''A photo of a cat'''
UpperCAmelCase_ : Any = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
self.assertEqual(_SCREAMING_SNAKE_CASE ,[2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('''test_opt''' )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''./test_opt''' )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
self.assertEqual(_SCREAMING_SNAKE_CASE ,[2, 250, 1_345, 9, 10, 4_758] )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,use_slow=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''A photo of a cat'''
UpperCAmelCase_ : int = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
# Same as above
self.assertEqual(_SCREAMING_SNAKE_CASE ,[2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''bos'''
UpperCAmelCase_ : Dict = tokenizer.get_vocab()['''bos''']
UpperCAmelCase_ : Optional[int] = '''A photo of a cat'''
UpperCAmelCase_ : Any = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
# We changed the bos token
self.assertEqual(_SCREAMING_SNAKE_CASE ,[31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('''./tok''' )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
UpperCAmelCase_ : Tuple = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
self.assertEqual(_SCREAMING_SNAKE_CASE ,[31_957, 250, 1_345, 9, 10, 4_758] ) | 30 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A__ :
"""simple docstring"""
__A : int
__A : Node | None = None
__A : Node | None = None
def A_ ( ) -> Node | None:
a__ : Any = Node(1 )
a__ : Union[str, Any] = Node(2 )
a__ : Optional[int] = Node(3 )
a__ : Optional[Any] = Node(4 )
a__ : Dict = Node(5 )
return tree
def A_ ( A__ ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A_ ( A__ ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A_ ( A__ ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A_ ( A__ ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A_ ( A__ ) -> Sequence[Node | None]:
a__ : list[Any] = []
if root is None:
return output
a__ : Any = deque([root] )
while process_queue:
a__ : Tuple = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A_ ( A__ , A__ ) -> Sequence[Node | None]:
a__ : list[Any] = []
def populate_output(A__ , A__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A__ , A__ )
return output
def A_ ( A__ , A__ ) -> Sequence[Node | None]:
a__ : list[Any] = []
def populate_output(A__ , A__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A__ , A__ )
return output
def A_ ( A__ ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
a__ : list[Sequence[Node | None]] = []
a__ : List[str] = 0
a__ : List[Any] = height(A__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A__ , A__ ) )
a__ : Dict = 1
else:
output.append(get_nodes_from_right_to_left(A__ , A__ ) )
a__ : Dict = 0
return output
def A_ ( ) -> None: # Main function for testing.
a__ : Optional[Any] = make_tree()
print(F'In-order Traversal: {inorder(A__ )}' )
print(F'Pre-order Traversal: {preorder(A__ )}' )
print(F'Post-order Traversal: {postorder(A__ )}' , '\n' )
print(F'Height of Tree: {height(A__ )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A__ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(A__ , level=A__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 392 |
from collections import namedtuple
lowercase : List[str] = namedtuple("""from_to""", """from_ to""")
lowercase : Tuple = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def A_ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__magic_name__: Tuple = logging.get_logger(__name__)
__magic_name__: Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__magic_name__: Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__magic_name__ : str = getattr(_A, _A )
if weight_type is not None:
__magic_name__ : str = getattr(_A, _A ).shape
else:
__magic_name__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__ : Union[str, Any] = value
elif weight_type == "weight_g":
__magic_name__ : Any = value
elif weight_type == "weight_v":
__magic_name__ : Union[str, Any] = value
elif weight_type == "bias":
__magic_name__ : List[str] = value
elif weight_type == "running_mean":
__magic_name__ : Union[str, Any] = value
elif weight_type == "running_var":
__magic_name__ : str = value
elif weight_type == "num_batches_tracked":
__magic_name__ : List[str] = value
elif weight_type == "inv_freq":
__magic_name__ : Dict = value
else:
__magic_name__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : int = []
__magic_name__ : Dict = fairseq_model.state_dict()
__magic_name__ : Optional[int] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ : int = False
if "conv_layers" in name:
load_conv_layer(
_A, _A, _A, _A, hf_model.config.feat_extract_norm == """group""", )
__magic_name__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
__magic_name__ : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__magic_name__ : Tuple = True
if "*" in mapped_key:
__magic_name__ : Optional[Any] = name.split(_A )[0].split(""".""" )[-2]
__magic_name__ : Optional[int] = mapped_key.replace("""*""", _A )
if "pos_bias_u" in name:
__magic_name__ : str = None
elif "pos_bias_v" in name:
__magic_name__ : Any = None
elif "weight_g" in name:
__magic_name__ : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__magic_name__ : Union[str, Any] = """weight_v"""
elif "bias" in name:
__magic_name__ : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ : str = """weight"""
elif "running_mean" in name:
__magic_name__ : Tuple = """running_mean"""
elif "inv_freq" in name:
__magic_name__ : Optional[Any] = """inv_freq"""
elif "running_var" in name:
__magic_name__ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
__magic_name__ : str = """num_batches_tracked"""
else:
__magic_name__ : List[Any] = None
set_recursively(_A, _A, _A, _A, _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : List[Any] = full_name.split("""conv_layers.""" )[-1]
__magic_name__ : Any = name.split(""".""" )
__magic_name__ : Dict = int(items[0] )
__magic_name__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__ : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCamelCase ( _A, _A, _A=None, _A=None, _A=True ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_A, hidden_act="""swish""" )
else:
__magic_name__ : Any = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__magic_name__ : str = """rotary"""
if is_finetuned:
if dict_path:
__magic_name__ : Tuple = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__ : str = target_dict.pad_index
__magic_name__ : Tuple = target_dict.bos_index
__magic_name__ : int = target_dict.eos_index
__magic_name__ : Dict = len(target_dict.symbols )
__magic_name__ : Optional[int] = os.path.join(_A, """vocab.json""" )
if not os.path.isdir(_A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) )
return
os.makedirs(_A, exist_ok=_A )
__magic_name__ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__ : List[Any] = 0
__magic_name__ : List[str] = 1
with open(_A, """w""", encoding="""utf-8""" ) as vocab_handle:
json.dump(_A, _A )
__magic_name__ : Optional[Any] = WavaVecaCTCTokenizer(
_A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=_A, )
__magic_name__ : Dict = True if config.feat_extract_norm == """layer""" else False
__magic_name__ : Any = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_A, return_attention_mask=_A, )
__magic_name__ : List[Any] = WavaVecaProcessor(feature_extractor=_A, tokenizer=_A )
processor.save_pretrained(_A )
__magic_name__ : int = WavaVecaConformerForCTC(_A )
else:
__magic_name__ : Optional[Any] = WavaVecaConformerForPreTraining(_A )
if is_finetuned:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__magic_name__ : Union[str, Any] = argparse.Namespace(task="""audio_pretraining""" )
__magic_name__ : Optional[Any] = fairseq.tasks.setup_task(_A )
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=_A )
__magic_name__ : Optional[int] = model[0].eval()
recursively_load_weights(_A, _A, not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__magic_name__: Optional[Any] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 324 |
from collections import deque
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = len(_A )
__magic_name__ : Optional[int] = deque()
__magic_name__ : Tuple = [False for _ in range(_A )]
__magic_name__ : Optional[Any] = [-1 for _ in range(_A )]
__magic_name__ : Optional[int] = index_of[:]
def strong_connect(_A, _A, _A ):
__magic_name__ : Dict = index # the number when this node is seen
__magic_name__ : Optional[Any] = index # lowest rank node reachable from here
index += 1
stack.append(_A )
__magic_name__ : str = True
for w in g[v]:
if index_of[w] == -1:
__magic_name__ : Union[str, Any] = strong_connect(_A, _A, _A )
__magic_name__ : Optional[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__magic_name__ : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__magic_name__ : Optional[Any] = []
__magic_name__ : List[Any] = stack.pop()
__magic_name__ : List[Any] = False
component.append(_A )
while w != v:
__magic_name__ : List[Any] = stack.pop()
__magic_name__ : Optional[int] = False
component.append(_A )
components.append(_A )
return index
__magic_name__ : Tuple = []
for v in range(_A ):
if index_of[v] == -1:
strong_connect(_A, 0, _A )
return components
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = [[] for _ in range(_A )]
for u, v in edges:
g[u].append(_A )
return g
if __name__ == "__main__":
# Test
__magic_name__: str = 7
__magic_name__: int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__magic_name__: Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__magic_name__: str = [(u, v) for u, v in zip(source, target)]
__magic_name__: List[str] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 324 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = [0 for i in range(len(_UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
__lowerCAmelCase : List[str] = 0, 0
for i in range(1 , len(_UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
__lowerCAmelCase : Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__lowerCAmelCase : str = min_edge
while go_next(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__lowerCAmelCase : int = i, i + z_result[i] - 1
return z_result
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return i + z_result[i] < len(_UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__lowerCAmelCase : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (_UpperCamelCase ):
# initialize config
if "resnet-50" in model_name:
__lowerCAmelCase : int = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
__lowerCAmelCase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
__lowerCAmelCase : Union[str, Any] = DetrConfig(use_timm_backbone=_UpperCamelCase , backbone_config=_UpperCamelCase )
# set label attributes
__lowerCAmelCase : Optional[int] = 'panoptic' in model_name
if is_panoptic:
__lowerCAmelCase : Dict = 250
else:
__lowerCAmelCase : List[str] = 91
__lowerCAmelCase : Tuple = 'huggingface/label-files'
__lowerCAmelCase : str = 'coco-detection-id2label.json'
__lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : List[Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[Any] = idalabel
__lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase (_UpperCamelCase ):
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Tuple = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Dict = val
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : Tuple = ''
if is_panoptic:
__lowerCAmelCase : Union[str, Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase : List[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : int = in_proj_weight[:256, :]
__lowerCAmelCase : Tuple = in_proj_bias[:256]
__lowerCAmelCase : Dict = in_proj_weight[256:512, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[256:512]
__lowerCAmelCase : int = in_proj_weight[-256:, :]
__lowerCAmelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase : int = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : List[str] = in_proj_weight[:256, :]
__lowerCAmelCase : int = in_proj_bias[:256]
__lowerCAmelCase : Any = in_proj_weight[256:512, :]
__lowerCAmelCase : Any = in_proj_bias[256:512]
__lowerCAmelCase : Any = in_proj_weight[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase : Union[str, Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__lowerCAmelCase : Optional[Any] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase : List[Any] = in_proj_weight_cross_attn[:256, :]
__lowerCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[:256]
__lowerCAmelCase : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
__lowerCAmelCase : Any = in_proj_bias_cross_attn[256:512]
__lowerCAmelCase : str = in_proj_weight_cross_attn[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Any = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : int = get_detr_config(_UpperCamelCase )
# load original model from torch hub
__lowerCAmelCase : List[str] = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F"Converting model {model_name}..." )
__lowerCAmelCase : List[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=_UpperCamelCase ).eval()
__lowerCAmelCase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_UpperCamelCase ):
if is_panoptic:
__lowerCAmelCase : List[str] = 'detr.' + src
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCamelCase , is_panoptic=_UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase : Optional[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase : List[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCAmelCase : Dict = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase : Tuple = DetrForSegmentation(_UpperCamelCase ) if is_panoptic else DetrForObjectDetection(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# verify our conversion on an image
__lowerCAmelCase : Optional[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
__lowerCAmelCase : List[str] = DetrImageProcessor(format=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = processor(images=prepare_img() , return_tensors='pt' )
__lowerCAmelCase : Optional[int] = encoding['pixel_values']
__lowerCAmelCase : Optional[Any] = detr(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowerCamelCase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 549 | 0 |
"""simple docstring"""
import sys
def _UpperCamelCase ( A ):
UpperCamelCase_ =len(A )
UpperCamelCase_ =[[0 for x in range(A )] for x in range(A )]
UpperCamelCase_ =[[0 for x in range(A )] for x in range(A )]
for chain_length in range(2 , A ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase_ =a + chain_length - 1
UpperCamelCase_ =sys.maxsize
for c in range(A , A ):
UpperCamelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase_ =cost
UpperCamelCase_ =c
return matrix, sol
def _UpperCamelCase ( A , A , A ):
if i == j:
print("A" + str(A ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(A , A , optimal_solution[i][j] )
print_optiomal_solution(A , optimal_solution[i][j] + 1 , A )
print(")" , end=" " )
def _UpperCamelCase ( ):
UpperCamelCase_ =[30, 35, 15, 5, 10, 20, 25]
UpperCamelCase_ =len(A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase_ , UpperCamelCase_ =matrix_chain_order(A )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 391 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
set_seed(770)
A_ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
A_ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
A_ = os.path.dirname(os.path.abspath(__file__))
A_ = os.path.join(os.path.expanduser("~"), ".cache")
A_ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _UpperCamelCase ( A , A=False ):
UpperCamelCase_ =model_type
if use_small:
key += "_small"
return os.path.join(A , REMOTE_MODEL_PATHS[key]["file_name"] )
def _UpperCamelCase ( A , A ):
os.makedirs(A , exist_ok=A )
hf_hub_download(repo_id=A , filename=A , local_dir=A )
def _UpperCamelCase ( A , A , A=False , A="text" ):
if model_type == "text":
UpperCamelCase_ =BarkSemanticModel
UpperCamelCase_ =BarkSemanticConfig
UpperCamelCase_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCamelCase_ =BarkCoarseModel
UpperCamelCase_ =BarkCoarseConfig
UpperCamelCase_ =BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCamelCase_ =BarkFineModel
UpperCamelCase_ =BarkFineConfig
UpperCamelCase_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCamelCase_ =f"""{model_type}_small""" if use_small else model_type
UpperCamelCase_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
UpperCamelCase_ =torch.load(A , map_location=A )
# this is a hack
UpperCamelCase_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
UpperCamelCase_ =model_args["vocab_size"]
UpperCamelCase_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCamelCase_ =model_args.pop("n_head" )
UpperCamelCase_ =model_args.pop("n_embd" )
UpperCamelCase_ =model_args.pop("n_layer" )
UpperCamelCase_ =ConfigClass(**checkpoint["model_args"] )
UpperCamelCase_ =ModelClass(config=A )
UpperCamelCase_ =GenerationConfigClass()
UpperCamelCase_ =model_generation_config
UpperCamelCase_ =checkpoint["model"]
# fixup checkpoint
UpperCamelCase_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(A ):
# replace part of the key with corresponding layer name in HF implementation
UpperCamelCase_ =k[len(A ) :]
for old_layer_name in new_layer_name_dict:
UpperCamelCase_ =new_k.replace(A , new_layer_name_dict[old_layer_name] )
UpperCamelCase_ =state_dict.pop(A )
UpperCamelCase_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCamelCase_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
UpperCamelCase_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCamelCase_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(A ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(A ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(A , strict=A )
UpperCamelCase_ =model.num_parameters(exclude_embeddings=A )
UpperCamelCase_ =checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(A , 3 )} loss""" )
model.eval()
model.to(A )
del checkpoint, state_dict
return model
def _UpperCamelCase ( A , A=False , A="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCamelCase_ ="cpu" # do conversion on cpu
UpperCamelCase_ =_get_ckpt_path(A , use_small=A )
UpperCamelCase_ =_load_model(A , A , model_type=A , use_small=A )
# load bark initial model
UpperCamelCase_ =_bark_load_model(A , "cpu" , model_type=A , use_small=A )
if model_type == "text":
UpperCamelCase_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=A ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
UpperCamelCase_ =5
UpperCamelCase_ =10
if model_type in ["text", "coarse"]:
UpperCamelCase_ =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
UpperCamelCase_ =bark_model(A )[0]
UpperCamelCase_ =model(A )
# take last logits
UpperCamelCase_ =output_new_model_total.logits[:, [-1], :]
else:
UpperCamelCase_ =3
UpperCamelCase_ =8
UpperCamelCase_ =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCamelCase_ =model(A , A )
UpperCamelCase_ =bark_model(A , A )
UpperCamelCase_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
def _UpperCamelCase ( A , A , A , A , A , A , ):
UpperCamelCase_ =os.path.join(A , A )
UpperCamelCase_ =BarkSemanticConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =BarkCoarseConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =BarkFineConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
UpperCamelCase_ =BarkSemanticModel.from_pretrained(A )
UpperCamelCase_ =BarkCoarseModel.from_pretrained(A )
UpperCamelCase_ =BarkFineModel.from_pretrained(A )
UpperCamelCase_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
UpperCamelCase_ =BarkConfig.from_sub_model_configs(
A , A , A , A )
UpperCamelCase_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCamelCase_ =BarkModel(A )
UpperCamelCase_ =semantic
UpperCamelCase_ =coarseAcoustic
UpperCamelCase_ =fineAcoustic
UpperCamelCase_ =codec
UpperCamelCase_ =bark_generation_config
Path(A ).mkdir(exist_ok=A )
bark.save_pretrained(A , repo_id=A , push_to_hub=A )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
A_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 391 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
snake_case_ = tuple[int, int]
class _lowercase :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
A : set[int] = vertices
A : dict[EdgeT, int] = {
(min(UpperCAmelCase__ ), max(UpperCAmelCase__ )): weight for edge, weight in edges.items()
}
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A : str = weight
def snake_case ( self ):
A : Graph = Graph({min(self.vertices )} , {} )
A : EdgeT
A : int
A : EdgeT
A : int
while len(subgraph.vertices ) < len(self.vertices ):
A : List[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A : List[Any] = edge
A : int = weight
subgraph.add_edge(UpperCAmelCase__ , UpperCAmelCase__ )
return subgraph
def _lowerCamelCase( UpperCamelCase__ : str = "p107_network.txt" ) -> int:
A : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
A : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
A : dict[EdgeT, int] = {}
A : list[str]
A : int
A : int
with open(UpperCamelCase__ ) as f:
A : Tuple = f.read().strip().split('''\n''' )
A : Union[str, Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
A : Tuple = int(adjaceny_matrix[edgea][edgea] )
A : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
A : Graph = graph.prims_algorithm()
A : int = sum(graph.edges.values() )
A : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCamelCase( UpperCamelCase__ : Dict ) -> Union[str, Any]:
A : Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , UpperCamelCase__ ).groups()[0]
class _lowercase ( a ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
A : str = file_names
A : Optional[int] = image_transform
A : str = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , _UpperCAmelCase ):
A : int = self.file_names[idx]
A : int = PIL.Image.open(_UpperCAmelCase )
A : str = raw_image.convert('''RGB''' )
if self.image_transform is not None:
A : Dict = self.image_transform(_UpperCAmelCase )
A : Tuple = extract_label(_UpperCAmelCase )
if self.label_to_id is not None:
A : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCamelCase( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ) -> Any:
# Initialize accelerator
if args.with_tracking:
A : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
A : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : List[str] = config['''lr''']
A : int = int(config['''num_epochs'''] )
A : List[str] = int(config['''seed'''] )
A : Any = int(config['''batch_size'''] )
A : List[str] = config['''image_size''']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
A : List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
A : List[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A : Optional[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A : Optional[Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A : Any = os.path.split(UpperCamelCase__ )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
A : int = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
A : int = [extract_label(UpperCamelCase__ ) for fname in file_names]
A : str = list(set(UpperCamelCase__ ) )
id_to_label.sort()
A : Dict = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
A : Dict = np.random.permutation(len(UpperCamelCase__ ) )
A : str = int(0.8 * len(UpperCamelCase__ ) )
A : Tuple = random_perm[:cut]
A : List[Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A : Any = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
A : Optional[Any] = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
A : List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
A : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
A : Tuple = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : Union[str, Any] = create_model('''resnet50d''' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A : Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A : Union[str, Any] = False
for param in model.get_classifier().parameters():
A : Any = True
# We normalize the batches of images to be a bit faster.
A : Dict = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
A : str = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A : List[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A : int = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A, A, A, A, A : Optional[int] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
A : str = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A : Optional[int] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A : Optional[int] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A : Optional[int] = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
A : Tuple = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
A : Union[str, Any] = None
else:
A : int = int(training_difference.replace('''step_''' , '''''' ) )
A : str = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
A : int = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A : str = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A : int = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
A : Optional[int] = (batch['''image'''] - mean) / std
A : int = model(UpperCamelCase__ )
A : List[Any] = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A : List[Any] = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A : Dict = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
A : Optional[int] = 0
A : int = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
A : Any = (batch['''image'''] - mean) / std
with torch.no_grad():
A : Union[str, Any] = model(UpperCamelCase__ )
A : Tuple = outputs.argmax(dim=-1 )
A, A : List[Any] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
A : Any = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A : str = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(UpperCamelCase__ ),
'''epoch''': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
A : Dict = F'''epoch_{epoch}'''
if args.output_dir is not None:
A : Dict = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def _lowerCamelCase( ) -> int:
A : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=UpperCamelCase__ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCamelCase__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
A : Tuple = parser.parse_args()
A : List[str] = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 537 | 0 |
from __future__ import annotations
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = divmod(len(a__ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Dict =[float(x) for x in input('Enter the elements of first array: ').split()]
lowerCAmelCase__ : int =[float(x) for x in input('Enter the elements of second array: ').split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 101 | """simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase ( a__ : Optional[int] , a__ : Dict=False ) -> Tuple:
try:
_UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase = strtobool(a__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCAmelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCAmelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCAmelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCAmelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def lowercase ( a__ : Union[str, Any] ) -> str:
try:
import faiss # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires faiss''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> str:
try:
import regex # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires regex''' )(a__ )
return test_case
def lowercase ( a__ : Any ) -> Union[str, Any]:
try:
import elasticsearch # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[Any]:
try:
import sqlalchemy # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(a__ )
return test_case
def lowercase ( a__ : Any ) -> Union[str, Any]:
if not config.TORCH_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires PyTorch''' )(a__ )
return test_case
def lowercase ( a__ : str ) -> Optional[Any]:
if not config.TF_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(a__ )
return test_case
def lowercase ( a__ : Optional[int] ) -> str:
if not config.JAX_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires JAX''' )(a__ )
return test_case
def lowercase ( a__ : str ) -> int:
if not config.PIL_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires Pillow''' )(a__ )
return test_case
def lowercase ( a__ : Union[str, Any] ) -> Dict:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[int]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(a__ )
else:
return test_case
def lowercase ( a__ : str ) -> Tuple:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> int:
def _require_spacy_model(a__ : Any ):
try:
import spacy # noqa F401
spacy.load(a__ )
except ImportError:
return unittest.skip('''test requires spacy''' )(a__ )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(a__ ) )(a__ )
else:
return test_case
return _require_spacy_model
def lowercase ( a__ : List[str] ) -> Dict:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(a__ )
else:
return test_case
def lowercase ( a__ : int ) -> Any:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[int]:
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCamelCase = unittest.skip('''test is slow''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> int:
if not _run_local_tests or _run_local_tests == 0:
_UpperCamelCase = unittest.skip('''test is local''' )(a__ )
return test_case
def lowercase ( a__ : Optional[int] ) -> List[str]:
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCamelCase = unittest.skip('''test is packaged''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> int:
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCamelCase = unittest.skip('''test requires remote''' )(a__ )
return test_case
def lowercase ( *a__ : Dict ) -> List[Any]:
def decorate(cls : str ):
for name, fn in cls.__dict__.items():
if callable(a__ ) and name.startswith('''test''' ):
for decorator in decorators:
_UpperCamelCase = decorator(a__ )
setattr(cls , a__ , a__ )
return cls
return decorate
class UpperCAmelCase_ ( _lowercase):
pass
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 0
snake_case__ = 1
snake_case__ = 2
@contextmanager
def lowercase ( a__ : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , a__ : Dict=1e-16 ) -> Any:
_UpperCamelCase = requests.Session().request
def timeout_request(a__ : Dict , a__ : str , a__ : Optional[Any] , **a__ : Any ):
# Change the url to an invalid url so that the connection hangs
_UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
_UpperCamelCase = timeout
try:
return online_request(a__ , a__ , **a__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCamelCase = url
_UpperCamelCase = e.args[0]
_UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]''' ),)
_UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(a__ : Optional[int] , a__ : int , **a__ : Any ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=a__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , a__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , a__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , a__ ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def lowercase ( *a__ : Optional[int] , **a__ : List[str] ) -> Optional[Any]:
_UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*a__ , **a__ ) as tmp_dir:
try:
os.chdir(a__ )
yield
finally:
os.chdir(a__ )
@contextmanager
def lowercase ( ) -> List[str]:
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase ( ) -> List[Any]:
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase ( a__ : Optional[Any] , a__ : Tuple ) -> List[Any]:
return deepcopy(a__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(a__ ).integers(0 , 100 , 10 ).tolist()
def lowercase ( a__ : Optional[Any] ) -> Tuple:
import decorator
from requests.exceptions import HTTPError
def _wrapper(a__ : Union[str, Any] , *a__ : Tuple , **a__ : Any ):
try:
return func(*a__ , **a__ )
except HTTPError as err:
if str(a__ ).startswith('''500''' ) or str(a__ ).startswith('''502''' ):
pytest.xfail(str(a__ ) )
raise err
return decorator.decorator(_wrapper , a__ )
class UpperCAmelCase_ :
def __init__( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Any:
_UpperCamelCase = returncode
_UpperCamelCase = stdout
_UpperCamelCase = stderr
async def lowercase ( a__ : int , a__ : Tuple ) -> int:
while True:
_UpperCamelCase = await stream.readline()
if line:
callback(a__ )
else:
break
async def lowercase ( a__ : Tuple , a__ : int=None , a__ : int=None , a__ : Optional[Any]=None , a__ : Optional[int]=False , a__ : List[str]=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(a__ ) )
_UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase = []
_UpperCamelCase = []
def tee(a__ : Optional[int] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : List[str]="" ):
_UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(a__ )
if not quiet:
print(a__ , a__ , file=a__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda a__ : tee(a__ , a__ , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda a__ : tee(a__ , a__ , sys.stderr , label='''stderr:''' ) ),
] , timeout=a__ , )
return _RunOutput(await p.wait() , a__ , a__ )
def lowercase ( a__ : Any , a__ : List[Any]=None , a__ : List[Any]=None , a__ : Tuple=180 , a__ : Tuple=False , a__ : Optional[int]=True ) -> _RunOutput:
_UpperCamelCase = asyncio.get_event_loop()
_UpperCamelCase = loop.run_until_complete(
_stream_subprocess(a__ , env=a__ , stdin=a__ , timeout=a__ , quiet=a__ , echo=a__ ) )
_UpperCamelCase = ''' '''.join(a__ )
if result.returncode > 0:
_UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase ( ) -> List[Any]:
_UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
_UpperCamelCase = re.sub(R'''^gw''' , '''''' , a__ , 0 , re.M )
return int(a__ )
def lowercase ( ) -> str:
_UpperCamelCase = 29500
_UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 420 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
__lowercase : List[Any] = hf_hub_url(repo_id=lowerCAmelCase_ , path=lowerCAmelCase_ , revision=lowerCAmelCase_ )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(lowerCAmelCase_ )}" | 649 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 649 | 1 |
def _lowercase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowercase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
print("""moving disk from""" , __UpperCamelCase , """to""" , __UpperCamelCase )
def _lowercase ( ):
snake_case__ = int(input("""Height of hanoi: """ ).strip() )
move_tower(__UpperCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 214 |
def _lowercase ( __UpperCamelCase : list ):
snake_case__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
snake_case__ = True
for i in range(0 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
snake_case__ , snake_case__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
snake_case__ = False
for i in range(1 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
snake_case__ , snake_case__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
snake_case__ = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCAmelCase : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase : int = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 214 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=lowerCAmelCase )
UpperCAmelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
UpperCAmelCase = parser.parse_args()
if not hasattr(lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 183 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=3_2 , lowercase_=3 , lowercase_=1_0 , lowercase_=[1_0, 2_0, 3_0, 4_0] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , ) -> str:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowercase_ )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self , lowercase_ , lowercase_ ) -> List[Any]:
UpperCAmelCase = FlaxRegNetModel(config=lowercase_ )
UpperCAmelCase = model(lowercase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def a_ ( self , lowercase_ , lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Tuple = False
def a_ ( self ) -> None:
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def a_ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self ) -> Optional[Any]:
return
def a_ ( self ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def a_ ( self ) -> List[str]:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def a_ ( self ) -> Optional[int]:
pass
def a_ ( self ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def a_ ( self ) -> int:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def a_ ( self ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ , **lowercase_ ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest('JIT Enabled' ):
UpperCAmelCase = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def a_ ( self ) -> List[str]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase_ , return_tensors='np' )
UpperCAmelCase = model(**lowercase_ )
# verify the logits
UpperCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 183 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
A__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A__ = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def _a ( self : Union[str, Any] , **_snake_case : Optional[int] ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Union[str, Any] , **_snake_case : Union[str, Any] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : List[str] , **_snake_case : int ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
A__ = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A__ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
A__ = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = self.prepare_image_inputs()
A__ = image_processor(_snake_case , return_tensors='np' )
A__ = processor(images=_snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = processor(text=_snake_case )
A__ = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = self.prepare_image_inputs()
A__ = self.prepare_image_inputs()
A__ = processor(images=_snake_case , visual_prompt=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _a ( self : int ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(_snake_case )
A__ = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 9 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case:
def __init__(self : Any , a : str , a : Union[str, Any]=12 , a : List[str]=7 , a : Dict=True , a : Tuple=True , a : Any=True , a : Optional[Any]=99 , a : Optional[Any]=32 , a : Tuple=32 , a : List[Any]=2 , a : str=4 , a : Dict=37 , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Dict=5_12 , a : List[Any]=0.02 , a : Any=0 , a : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _UpperCamelCase (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(a )
def _UpperCamelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase (self : Optional[int] , a : Any , a : List[Any] , a : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = TFBlipTextModel(config=a )
A__ = model(a , attention_mask=a , training=a )
A__ = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase (self : str ) -> List[Any]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case( UpperCAmelCase , unittest.TestCase ):
__snake_case: Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case: Union[str, Any] = False
__snake_case: Any = False
__snake_case: Union[str, Any] = False
def _UpperCamelCase (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase (self : Any ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase (self : Dict ) -> Dict:
"""simple docstring"""
pass
def _UpperCamelCase (self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCamelCase (self : str , a : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=a )
| 531 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : UNetaDModel , UpperCAmelCase : KarrasVeScheduler ) -> List[str]:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , **UpperCAmelCase : Union[str, Any] , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCamelCase__ : Any = self.unet.config.sample_size
lowerCamelCase__ : Union[str, Any] = (batch_size, 3, img_size, img_size)
lowerCamelCase__ : Tuple = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowerCamelCase__ : Optional[int] = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowerCamelCase__ : Union[str, Any] = self.scheduler.schedule[t]
lowerCamelCase__ : Optional[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.scheduler.add_noise_to_input(UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase__ : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowerCamelCase__ : Tuple = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase__ : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowerCamelCase__ : Union[str, Any] = self.scheduler.step_correct(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
lowerCamelCase__ : str = step_output.prev_sample
lowerCamelCase__ : Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : List[Any] = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 188 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """donut-swin"""
UpperCAmelCase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , UpperCAmelCase : List[str]=224 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=96 , UpperCAmelCase : int=[2, 2, 6, 2] , UpperCAmelCase : Dict=[3, 6, 12, 24] , UpperCAmelCase : Any=7 , UpperCAmelCase : Optional[int]=4.0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : int=0.1 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : List[str]=False , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : str=1e-5 , **UpperCAmelCase : Tuple , ) -> int:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Optional[int] = patch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : int = embed_dim
lowerCamelCase__ : Optional[Any] = depths
lowerCamelCase__ : Optional[int] = len(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = num_heads
lowerCamelCase__ : Tuple = window_size
lowerCamelCase__ : Dict = mlp_ratio
lowerCamelCase__ : str = qkv_bias
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : str = drop_path_rate
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : str = use_absolute_embeddings
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ : Optional[int] = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
| 188 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[int] = logging.get_logger(__name__)
_A : Tuple = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a__ ( a_ ):
__lowerCAmelCase = """speech_to_text_2"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _a=10_000 , _a=6 , _a=2_048 , _a=4 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=2 , _a=True , _a=1 , _a=0 , _a=2 , _a=1_024 , **_a , ):
lowercase : int = vocab_size
lowercase : Any = d_model
lowercase : Optional[int] = decoder_ffn_dim
lowercase : Dict = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : List[str] = attention_dropout
lowercase : Any = activation_dropout
lowercase : Optional[Any] = activation_function
lowercase : str = init_std
lowercase : Any = decoder_layerdrop
lowercase : Any = use_cache
lowercase : Optional[int] = decoder_layers
lowercase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : List[str] = max_target_positions
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 361 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __magic_name__ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__snake_case , 2 ) - pow(__snake_case , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__snake_case , 2 ) - pow(__snake_case , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__snake_case , 2 ) + pow(__snake_case , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
UpperCAmelCase = '''blip_text_model'''
def __init__( self :Optional[Any] ,__UpperCAmelCase :Tuple=3_05_24 ,__UpperCAmelCase :str=7_68 ,__UpperCAmelCase :Dict=7_68 ,__UpperCAmelCase :Optional[int]=30_72 ,__UpperCAmelCase :List[Any]=7_68 ,__UpperCAmelCase :List[Any]=12 ,__UpperCAmelCase :Optional[Any]=8 ,__UpperCAmelCase :List[Any]=5_12 ,__UpperCAmelCase :Union[str, Any]="gelu" ,__UpperCAmelCase :Union[str, Any]=1E-12 ,__UpperCAmelCase :Optional[int]=0.0 ,__UpperCAmelCase :Optional[Any]=0.0 ,__UpperCAmelCase :Optional[Any]=0.02 ,__UpperCAmelCase :int=3_05_22 ,__UpperCAmelCase :List[str]=2 ,__UpperCAmelCase :List[Any]=0 ,__UpperCAmelCase :Tuple=1_02 ,__UpperCAmelCase :str=True ,__UpperCAmelCase :List[Any]=True ,**__UpperCAmelCase :Optional[int] ,) -> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,sep_token_id=__UpperCAmelCase ,**__UpperCAmelCase ,)
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Union[str, Any] = encoder_hidden_size
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Tuple = projection_dim
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = is_decoder
lowerCamelCase__ : List[str] = use_cache
@classmethod
def lowercase_ ( cls :List[Any] ,__UpperCAmelCase :Union[str, os.PathLike] ,**__UpperCAmelCase :Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
lowerCamelCase__ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
UpperCAmelCase = '''blip_vision_model'''
def __init__( self :int ,__UpperCAmelCase :Dict=7_68 ,__UpperCAmelCase :Optional[int]=30_72 ,__UpperCAmelCase :List[str]=5_12 ,__UpperCAmelCase :List[str]=12 ,__UpperCAmelCase :Any=12 ,__UpperCAmelCase :str=3_84 ,__UpperCAmelCase :List[Any]=16 ,__UpperCAmelCase :int="gelu" ,__UpperCAmelCase :Union[str, Any]=1E-5 ,__UpperCAmelCase :Optional[Any]=0.0 ,__UpperCAmelCase :Any=1E-10 ,**__UpperCAmelCase :int ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = projection_dim
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Dict = attention_dropout
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Any = hidden_act
@classmethod
def lowercase_ ( cls :int ,__UpperCAmelCase :Union[str, os.PathLike] ,**__UpperCAmelCase :List[str] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
lowerCamelCase__ : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
UpperCAmelCase = '''blip'''
UpperCAmelCase = True
def __init__( self :Union[str, Any] ,__UpperCAmelCase :Optional[int]=None ,__UpperCAmelCase :Tuple=None ,__UpperCAmelCase :str=5_12 ,__UpperCAmelCase :Union[str, Any]=2.6_592 ,__UpperCAmelCase :Optional[int]=2_56 ,**__UpperCAmelCase :Any ,) -> List[Any]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if text_config is None:
lowerCamelCase__ : Optional[Any] = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase__ : str = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
lowerCamelCase__ : Optional[Any] = BlipTextConfig(**__UpperCAmelCase )
lowerCamelCase__ : int = BlipVisionConfig(**__UpperCAmelCase )
lowerCamelCase__ : Any = self.vision_config.hidden_size
lowerCamelCase__ : Union[str, Any] = projection_dim
lowerCamelCase__ : Optional[int] = logit_scale_init_value
lowerCamelCase__ : List[Any] = 1.0
lowerCamelCase__ : Dict = 0.02
lowerCamelCase__ : Tuple = image_text_hidden_size
@classmethod
def lowercase_ ( cls :Tuple ,__UpperCAmelCase :BlipTextConfig ,__UpperCAmelCase :BlipVisionConfig ,**__UpperCAmelCase :List[Any] ) -> Tuple:
"""simple docstring"""
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__UpperCAmelCase )
def lowercase_ ( self :List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Optional[int] = self.text_config.to_dict()
lowerCamelCase__ : str = self.vision_config.to_dict()
lowerCamelCase__ : int = self.__class__.model_type
return output
| 121 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : Dict = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = pipe.dual_guided(
prompt='''first prompt''' ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
lowerCamelCase__ : Dict = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : Dict = generator.manual_seed(0 )
lowerCamelCase__ : List[str] = pipe.dual_guided(
prompt='''first prompt''' ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowercase_ ( self :Tuple ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : Tuple = '''cyberpunk 2077'''
lowerCamelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : str = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe.dual_guided(
prompt=__UpperCAmelCase ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ,).images
lowerCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase__ : Tuple = '''A painting of a squirrel eating a burger '''
lowerCamelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe.text_to_image(
prompt=__UpperCAmelCase ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ).images
lowerCamelCase__ : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase__ : Any = pipe.image_variation(__UpperCAmelCase ,generator=__UpperCAmelCase ,output_type='''numpy''' ).images
lowerCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : str = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 121 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : str = 2
while i * i <= n:
UpperCamelCase__ : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __UpperCAmelCase ( ) -> List[str]:
UpperCamelCase__ : str = 1
UpperCamelCase__ : List[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase_) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 596 |
'''simple docstring'''
from math import sqrt
def __UpperCAmelCase ( lowerCamelCase_ = 1_000_000) -> int:
UpperCamelCase__ : int = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2).is_integer():
num_cuboids += (
min(lowerCamelCase_ , sum_shortest_sides // 2)
- max(1 , sum_shortest_sides - max_cuboid_size)
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 596 | 1 |
from manim import *
class lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
def __a ( self ):
_lowercase : Any = Rectangle(height=0.5 , width=0.5 )
_lowercase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowercase : List[Any] = [mem.copy() for i in range(6 )]
_lowercase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowercase : Dict = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : str = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : int = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
_lowercase : Optional[Any] = Text('CPU' , font_size=2_4 )
_lowercase : Optional[int] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
_lowercase : Tuple = [mem.copy() for i in range(4 )]
_lowercase : Optional[int] = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : Union[str, Any] = Text('GPU' , font_size=2_4 )
_lowercase : List[Any] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
_lowercase : List[Any] = [mem.copy() for i in range(6 )]
_lowercase : Tuple = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : Any = Text('Model' , font_size=2_4 )
_lowercase : int = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
_lowercase : Union[str, Any] = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowercase : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
cpu_targs.append(A_ )
_lowercase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowercase : Optional[int] = VGroup(*A_ ).arrange(A_ , buff=0 )
_lowercase : List[Any] = Text('Loaded Checkpoint' , font_size=2_4 )
_lowercase : Tuple = Group(A_ , A_ ).arrange(A_ , aligned_edge=A_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowercase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase : int = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
_lowercase : Any = MarkupText(
F"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowercase : Optional[int] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) , Write(A_ ) )
self.play(Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
_lowercase : str = []
_lowercase : Tuple = []
for i, rect in enumerate(A_ ):
_lowercase : int = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
first_animations.append(GrowFromCenter(A_ , run_time=1 ) )
_lowercase : Optional[int] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(*A_ )
self.wait()
| 703 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __UpperCamelCase :
__snake_case :Optional[Any] = MBartConfig
__snake_case :str = {}
__snake_case :str = 'gelu'
def __init__( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : Union[str, Any]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Tuple=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Optional[Any]=20 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=1 , _lowerCAmelCase : List[str]=0 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_mbart_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def _a ( self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = TFMBartModel(config=_lowerCAmelCase ).get_decoder()
__lowercase = inputs_dict["""input_ids"""]
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict["""attention_mask"""][:1, :]
__lowercase = inputs_dict["""head_mask"""]
__lowercase = 1
# first forward pass
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
__lowercase , __lowercase = outputs.to_tuple()
__lowercase = past_key_values[1]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__snake_case :Optional[int] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__snake_case :Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case :str = True
__snake_case :List[str] = False
__snake_case :Tuple = False
def _a ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFMBartModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
__snake_case :Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
__snake_case :Optional[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__snake_case :List[Any] = 'facebook/mbart-large-en-ro'
@cached_property
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self : str , **_lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.translate_src_text(**_lowerCAmelCase )
self.assertListEqual(self.expected_text , _lowerCAmelCase )
def _a ( self : Dict , **_lowerCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer(self.src_text , **_lowerCAmelCase , return_tensors="""tf""" )
__lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowercase = self.tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
return generated_words
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 80 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0**9 ) -> int:
'''simple docstring'''
lowercase = 1
lowercase = 2
lowercase = 0
lowercase = 0
lowercase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359 | 0 |
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowercase__ : Optional[Any] = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : List[Any] = int(sequence[i] ,2 )
return sequence
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase__ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase__ : Any = gray_code_sequence_string(bit_count - 1 )
lowercase__ : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase__ : str = "0" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase__ : Optional[int] = "1" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod() | 298 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = inspect.getfile(accelerate.test_utils )
lowercase__ : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase__ : Optional[int] = test_metrics
@require_cpu
def __a ( self ) -> List[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __a ( self ) -> Dict:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def __a ( self ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowercase__ : Optional[Any] = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase , env=os.environ.copy() ) | 298 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ ='''naver-clova-ix/donut-base-finetuned-docvqa'''
SCREAMING_SNAKE_CASE__ =(
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
SCREAMING_SNAKE_CASE__ ='''document_qa'''
SCREAMING_SNAKE_CASE__ =AutoProcessor
SCREAMING_SNAKE_CASE__ =VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE__ =['''image''', '''text''']
SCREAMING_SNAKE_CASE__ =['''text''']
def __init__( self, *_a, **_a ) -> Dict:
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*_lowercase, **_lowercase )
def __lowerCAmelCase ( self, _a, _a ) -> Any:
__SCREAMING_SNAKE_CASE = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__SCREAMING_SNAKE_CASE = task_prompt.replace("{user_input}", _lowercase )
__SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer(
_lowercase, add_special_tokens=_lowercase, return_tensors="pt" ).input_ids
__SCREAMING_SNAKE_CASE = self.pre_processor(_lowercase, return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self, _a ) -> List[Any]:
return self.model.generate(
inputs["pixel_values"].to(self.device ), decoder_input_ids=inputs["decoder_input_ids"].to(self.device ), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=_lowercase, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=_lowercase, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=_lowercase, ).sequences
def __lowerCAmelCase ( self, _a ) -> Dict:
__SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(_lowercase )[0]
__SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token, "" )
__SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token, "" )
__SCREAMING_SNAKE_CASE = re.sub(r"<.*?>", "", _lowercase, count=1 ).strip() # remove first task start token
__SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(_lowercase )
return sequence["answer"]
| 693 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 0 |
"""simple docstring"""
from typing import Any
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
_validation(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Creates data structures and fill initial step
lowerCamelCase__ : dict = {}
lowerCamelCase__ : dict = {}
for state in states_space:
lowerCamelCase__ : List[Any] = observations_space[0]
lowerCamelCase__ : Optional[int] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__ : str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_lowerCamelCase ) ):
lowerCamelCase__ : Any = observations_space[o]
lowerCamelCase__ : Any = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__ : Union[str, Any] = ''
lowerCamelCase__ : str = -1
for k_state in states_space:
lowerCamelCase__ : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__ : int = probability
lowerCamelCase__ : List[str] = k_state
# Update probabilities and pointers dicts
lowerCamelCase__ : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__ : Any = arg_max
# The final observation
lowerCamelCase__ : List[str] = observations_space[len(_lowerCamelCase ) - 1]
# argmax for given final observation
lowerCamelCase__ : Tuple = ''
lowerCamelCase__ : Optional[int] = -1
for k_state in states_space:
lowerCamelCase__ : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__ : Optional[Any] = probability
lowerCamelCase__ : str = k_state
lowerCamelCase__ : str = arg_max
# Process pointers backwards
lowerCamelCase__ : Optional[int] = last_state
lowerCamelCase__ : str = []
for o in range(len(_lowerCamelCase ) - 1 , -1 , -1 ):
result.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
_validate_not_empty(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
_validate_lists(_lowerCamelCase , _lowerCamelCase )
_validate_dicts(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
_validate_list(_lowerCamelCase , 'observations_space' )
_validate_list(_lowerCamelCase , 'states_space' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_object , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = f'''{var_name} must be a list'''
raise ValueError(_lowerCamelCase )
else:
for x in _object:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = f'''{var_name} must be a list of strings'''
raise ValueError(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
_validate_dict(_lowerCamelCase , 'initial_probabilities' , _lowerCamelCase )
_validate_nested_dict(_lowerCamelCase , 'transition_probabilities' )
_validate_nested_dict(_lowerCamelCase , 'emission_probabilities' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
_validate_dict(_object , _lowerCamelCase , _lowerCamelCase )
for x in _object.values():
_validate_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
if not isinstance(_object , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = f'''{var_name} must be a dict'''
raise ValueError(_lowerCamelCase )
if not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for x in _object ):
lowerCamelCase__ : Optional[Any] = f'''{var_name} all keys must be strings'''
raise ValueError(_lowerCamelCase )
if not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for x in _object.values() ):
lowerCamelCase__ : Dict = 'nested dictionary ' if nested else ''
lowerCamelCase__ : Dict = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 1 |
from collections.abc import Sequence
def __magic_name__ ( lowercase , lowercase ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(lowercase ) )
def __magic_name__ ( lowercase , lowercase ) -> float:
"""simple docstring"""
lowercase_ : Any = 0.0
for coeff in reversed(lowercase ):
lowercase_ : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 458 |
from string import ascii_uppercase
UpperCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ = dict(enumerate(ascii_uppercase))
def __magic_name__ ( lowercase , lowercase ) -> str:
"""simple docstring"""
lowercase_ : Dict = len(lowercase )
lowercase_ : Tuple = 0
while True:
if x == i:
lowercase_ : Dict = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def __magic_name__ ( lowercase , lowercase ) -> str:
"""simple docstring"""
lowercase_ : Any = """"""
lowercase_ : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase_ : Any = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __magic_name__ ( lowercase , lowercase ) -> str:
"""simple docstring"""
lowercase_ : Optional[Any] = """"""
lowercase_ : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase_ : Optional[int] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __magic_name__ ( ) -> None:
"""simple docstring"""
lowercase_ : Union[str, Any] = """THE GERMAN ATTACK"""
lowercase_ : str = """SECRET"""
lowercase_ : int = generate_key(lowercase , lowercase )
lowercase_ : List[str] = cipher_text(lowercase , lowercase )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(lowercase , lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 458 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
UpperCAmelCase = TypeVar('''U''')
class A_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = key
lowercase = val
lowercase = None
lowercase = None
def __repr__( self ):
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class A_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
lowercase = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase = self.rear, self.head
def __repr__( self ):
lowercase = ['''DoubleLinkedList''']
lowercase = self.head
while node.next is not None:
rep.append(str(UpperCAmelCase__ ) )
lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase = node
lowercase = previous
lowercase = node
lowercase = self.rear
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if node.prev is None or node.next is None:
return None
lowercase = node.next
lowercase = node.prev
lowercase = None
lowercase = None
return node
class A_ ( Generic[T, U] ):
'''simple docstring'''
_UpperCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , snake_case ):
lowercase = DoubleLinkedList()
lowercase = capacity
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = {}
def __repr__( self ):
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , snake_case ):
return key in self.cache
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase = self.cache[key]
lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCAmelCase__ )
return node.val
self.miss += 1
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCAmelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase = value
self.list.add(UpperCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case = 128 ):
def cache_decorator_inner(snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase = LRUCache(UpperCAmelCase__ )
lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase = func(*UpperCAmelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCAmelCase__ , 'cache_info' , UpperCAmelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = k_size // 2
lowercase , lowercase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowercase = 1 / (2 * pi * sigma) * exp(-(square(__SCREAMING_SNAKE_CASE ) + square(__SCREAMING_SNAKE_CASE )) / (2 * square(__SCREAMING_SNAKE_CASE )) )
return g
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase , lowercase = image.shape[0], image.shape[1]
# dst image height and width
lowercase = height - k_size + 1
lowercase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowercase = zeros((dst_height * dst_width, k_size * k_size) )
lowercase = 0
for i, j in product(range(__SCREAMING_SNAKE_CASE ) , range(__SCREAMING_SNAKE_CASE ) ):
lowercase = ravel(image[i : i + k_size, j : j + k_size] )
lowercase = window
row += 1
# turn the kernel into shape(k*k, 1)
lowercase = gen_gaussian_kernel(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = ravel(__SCREAMING_SNAKE_CASE )
# reshape and get the dst image
lowercase = dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
return dst
if __name__ == "__main__":
# read original image
UpperCAmelCase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCAmelCase = gaussian_filter(gray, 3, sigma=1)
UpperCAmelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 565 | 0 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase = False
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """ybelkada/fonts"""
def _lowerCamelCase ( ) -> int:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Any, UpperCAmelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(UpperCAmelCase_, ["torch"] )
_check_torch_version()
A__ = image_tensor.unsqueeze(0 )
A__ = torch.nn.functional.unfold(UpperCAmelCase_, (patch_height, patch_width), stride=(patch_height, patch_width) )
A__ = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), UpperCAmelCase_, UpperCAmelCase_, -1 )
A__ = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : int = 36, UpperCAmelCase_ : str = "black", UpperCAmelCase_ : str = "white", UpperCAmelCase_ : int = 5, UpperCAmelCase_ : int = 5, UpperCAmelCase_ : int = 5, UpperCAmelCase_ : int = 5, UpperCAmelCase_ : Optional[bytes] = None, UpperCAmelCase_ : Optional[str] = None, ) -> Image.Image:
"""simple docstring"""
requires_backends(UpperCAmelCase_, "vision" )
# Add new lines so that each line is no more than 80 characters.
A__ = textwrap.TextWrapper(width=80 )
A__ = wrapper.wrap(text=UpperCAmelCase_ )
A__ = "\n".join(UpperCAmelCase_ )
if font_bytes is not None and font_path is None:
A__ = io.BytesIO(UpperCAmelCase_ )
elif font_path is not None:
A__ = font_path
else:
A__ = hf_hub_download(UpperCAmelCase_, "Arial.TTF" )
A__ = ImageFont.truetype(UpperCAmelCase_, encoding="UTF-8", size=UpperCAmelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
A__ = ImageDraw.Draw(Image.new("RGB", (1, 1), UpperCAmelCase_ ) )
A__ , A__ , A__ , A__ = temp_draw.textbbox((0, 0), UpperCAmelCase_, UpperCAmelCase_ )
# Create the actual image with a bit of padding around the text.
A__ = text_width + left_padding + right_padding
A__ = text_height + top_padding + bottom_padding
A__ = Image.new("RGB", (image_width, image_height), UpperCAmelCase_ )
A__ = ImageDraw.Draw(UpperCAmelCase_ )
draw.text(xy=(left_padding, top_padding), text=UpperCAmelCase_, fill=UpperCAmelCase_, font=UpperCAmelCase_ )
return image
def _lowerCamelCase ( UpperCAmelCase_ : np.ndarray, UpperCAmelCase_ : str, **UpperCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
requires_backends(UpperCAmelCase_, "vision" )
# Convert to PIL image if necessary
A__ = to_pil_image(UpperCAmelCase_ )
A__ = render_text(UpperCAmelCase_, **UpperCAmelCase_ )
A__ = max(header_image.width, image.width )
A__ = int(image.height * (new_width / image.width) )
A__ = int(header_image.height * (new_width / header_image.width) )
A__ = Image.new("RGB", (new_width, new_height + new_header_height), "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
A__ = to_numpy_array(UpperCAmelCase_ )
if infer_channel_dimension_format(UpperCAmelCase_ ) == ChannelDimension.LAST:
A__ = to_channel_dimension_format(UpperCAmelCase_, ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : int = ["flattened_patches"]
def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 2048 , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
A__ = do_normalize
A__ = do_convert_rgb
A__ = max_patches
A__ = is_vqa
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
A__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , ChannelDimension.FIRST )
A__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
A__ , A__ = patch_size["height"], patch_size["width"]
A__ , A__ = get_image_size(SCREAMING_SNAKE_CASE__ )
# maximize scale s.t.
A__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
A__ = max(min(math.floor(scale * image_height / patch_height ) , SCREAMING_SNAKE_CASE__ ) , 1 )
A__ = max(min(math.floor(scale * image_width / patch_width ) , SCREAMING_SNAKE_CASE__ ) , 1 )
A__ = max(num_feasible_rows * patch_height , 1 )
A__ = max(num_feasible_cols * patch_width , 1 )
A__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE__ , antialias=SCREAMING_SNAKE_CASE__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
A__ = torch_extract_patches(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = patches.shape
A__ = patches_shape[1]
A__ = patches_shape[2]
A__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
A__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
A__ = torch.arange(SCREAMING_SNAKE_CASE__ ).reshape([rows, 1] ).repeat(1 , SCREAMING_SNAKE_CASE__ ).reshape([rows * columns, 1] )
A__ = torch.arange(SCREAMING_SNAKE_CASE__ ).reshape([1, columns] ).repeat(SCREAMING_SNAKE_CASE__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
A__ = row_ids.to(torch.floataa )
A__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
A__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
A__ = torch.nn.functional.pad(SCREAMING_SNAKE_CASE__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
A__ = to_numpy_array(SCREAMING_SNAKE_CASE__ )
return result
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
if image.dtype == np.uinta:
A__ = image.astype(np.floataa )
# take mean across the whole `image`
A__ = np.mean(SCREAMING_SNAKE_CASE__ )
A__ = np.std(SCREAMING_SNAKE_CASE__ )
A__ = max(SCREAMING_SNAKE_CASE__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> ImageInput:
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = patch_size if patch_size is not None else self.patch_size
A__ = max_patches if max_patches is not None else self.max_patches
A__ = self.is_vqa
if kwargs.get("data_format" , SCREAMING_SNAKE_CASE__ ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
A__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
A__ = kwargs.pop("font_bytes" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("font_path" , SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [header_text] * len(SCREAMING_SNAKE_CASE__ )
A__ = [
render_header(SCREAMING_SNAKE_CASE__ , header_text[i] , font_bytes=SCREAMING_SNAKE_CASE__ , font_path=SCREAMING_SNAKE_CASE__ )
for i, image in enumerate(SCREAMING_SNAKE_CASE__ )
]
if do_normalize:
A__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ ) for image in images]
# convert to torch tensor and permute
A__ = [
self.extract_flattened_patches(image=SCREAMING_SNAKE_CASE__ , max_patches=SCREAMING_SNAKE_CASE__ , patch_size=SCREAMING_SNAKE_CASE__ )
for image in images
]
# create attention mask in numpy
A__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
A__ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_outputs
| 104 |
def __lowercase ( _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Optional[Any] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
for j in range(i + 1, _UpperCamelCase ):
if numbers[j] < numbers[i]:
lowercase , lowercase : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__a = input('''Enter numbers separated by a comma:\n''').strip()
__a = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 319 | 0 |
'''simple docstring'''
import os
import sys
import unittest
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
a = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
a = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ) -> Any:
snake_case: Tuple =get_test_to_tester_mapping(a_ )
snake_case: Union[str, Any] =get_test_to_tester_mapping(a_ )
snake_case: Optional[Any] ={'BertModelTest': 'BertModelTester'}
snake_case: str ={
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(a_ ) , a_ )
self.assertEqual(get_test_info.to_json(a_ ) , a_ )
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
snake_case: Any =get_model_to_test_mapping(a_ )
snake_case: Optional[Any] =get_model_to_test_mapping(a_ )
snake_case: Any ={
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case: Any ={
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(a_ ) , a_ )
self.assertEqual(get_test_info.to_json(a_ ) , a_ )
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
snake_case: List[str] =get_model_to_tester_mapping(a_ )
snake_case: int =get_model_to_tester_mapping(a_ )
snake_case: List[str] ={
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case: List[Any] ={
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(a_ ) , a_ )
self.assertEqual(get_test_info.to_json(a_ ) , a_ )
| 347 |
'''simple docstring'''
import sys
from collections import defaultdict
class a_ :
def __init__( self : Union[str, Any] ) -> Optional[int]:
snake_case: Any =[]
def UpperCamelCase ( self : List[str] , a_ : Optional[int] ) -> str:
return self.node_position[vertex]
def UpperCamelCase ( self : Union[str, Any] , a_ : List[str] , a_ : List[Any] ) -> Tuple:
snake_case: Optional[Any] =pos
def UpperCamelCase ( self : Any , a_ : Any , a_ : Tuple , a_ : Optional[Any] , a_ : Optional[Any] ) -> Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case: int =2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case: int =2 * start + 1
else:
snake_case: Any =2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case , snake_case: Any =heap[smallest_child], positions[smallest_child]
snake_case , snake_case: int =(
heap[start],
positions[start],
)
snake_case , snake_case: Any =temp, tempa
snake_case: List[Any] =self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a_ )
self.top_to_bottom(a_ , a_ , a_ , a_ )
def UpperCamelCase ( self : Union[str, Any] , a_ : Tuple , a_ : List[str] , a_ : int , a_ : str ) -> List[str]:
snake_case: Tuple =position[index]
while index != 0:
snake_case: List[Any] =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case: Tuple =heap[parent]
snake_case: Dict =position[parent]
self.set_position(position[parent] , a_ )
else:
snake_case: Dict =val
snake_case: List[str] =temp
self.set_position(a_ , a_ )
break
snake_case: List[str] =parent
else:
snake_case: List[str] =val
snake_case: int =temp
self.set_position(a_ , 0 )
def UpperCamelCase ( self : Optional[Any] , a_ : Tuple , a_ : Optional[int] ) -> List[str]:
snake_case: Optional[int] =len(a_ ) // 2 - 1
for i in range(a_ , -1 , -1 ):
self.top_to_bottom(a_ , a_ , len(a_ ) , a_ )
def UpperCamelCase ( self : Optional[Any] , a_ : Union[str, Any] , a_ : Optional[int] ) -> List[Any]:
snake_case: Dict =positions[0]
snake_case: List[Any] =sys.maxsize
self.top_to_bottom(a_ , 0 , len(a_ ) , a_ )
return temp
def a_ ( __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: Union[str, Any] =Heap()
snake_case: Optional[int] =[0] * len(__UpperCAmelCase )
snake_case: Optional[Any] =[-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case: List[str] =[] # Heap of Distance of vertices from their neighboring vertex
snake_case: Tuple =[]
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
snake_case: int =[]
snake_case: Union[str, Any] =1
snake_case: Union[str, Any] =sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case: List[Any] =0
snake_case: Union[str, Any] =distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
snake_case: Optional[int] =heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case: Union[str, Any] =1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
snake_case: List[Any] =distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
snake_case: str =vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input('Enter number of edges: ').strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 347 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "T5Config"
class __UpperCamelCase ( A__ ):
__A : str = """mt5"""
__A : Optional[Any] = MTaConfig
class __UpperCamelCase ( A__ ):
__A : Tuple = """mt5"""
__A : List[str] = MTaConfig
class __UpperCamelCase ( A__ ):
__A : Optional[Any] = """mt5"""
__A : int = MTaConfig | 32 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = len(a__ )
for i in range(n - 1 ):
for j in range(i + 1 , a__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if len(a__ ) <= 1:
return arr, 0
_UpperCamelCase = len(a__ ) // 2
_UpperCamelCase = arr[0:mid]
_UpperCamelCase = arr[mid:]
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
_UpperCamelCase , _UpperCamelCase = _count_cross_inversions(a__ , a__ )
_UpperCamelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = _UpperCamelCase = _UpperCamelCase = 0
while i < len(a__ ) and j < len(a__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(a__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(a__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCAmelCase__ ( ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , a__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , a__ )
# an empty list should also have zero inversions
_UpperCamelCase = []
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , a__ )
if __name__ == "__main__":
main()
| 703 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : List[str] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : str = '''luke'''
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ) -> Dict:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = entity_vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = entity_emb_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_entity_aware_attention
SCREAMING_SNAKE_CASE = classifier_dropout
| 73 |
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
SCREAMING_SNAKE_CASE_ : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
SCREAMING_SNAKE_CASE_ : Optional[int] = min(lowerCamelCase_ , lowerCamelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 105 | 0 |
from __future__ import annotations
def snake_case (__lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ) -> tuple[int, float, str]:
'''simple docstring'''
_snake_case : Dict = cipher_alphabet or [chr(__lowercase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_snake_case : List[Any] = {
"a": 0.08497,
"b": 0.01492,
"c": 0.02202,
"d": 0.04253,
"e": 0.11162,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.07546,
"j": 0.00153,
"k": 0.01292,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.07587,
"s": 0.06327,
"t": 0.09356,
"u": 0.02758,
"v": 0.00978,
"w": 0.02560,
"x": 0.00150,
"y": 0.01994,
"z": 0.00077,
}
else:
# Custom frequencies dictionary
_snake_case : Union[str, Any] = frequencies_dict
if not case_sensitive:
_snake_case : Any = ciphertext.lower()
# Chi squared statistic values
_snake_case : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(__lowercase ) ):
_snake_case : List[str] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_snake_case : List[Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_snake_case : Dict = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_snake_case : str = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_snake_case : str = decrypted_with_shift.lower().count(__lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_snake_case : Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_snake_case : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_snake_case : List[Any] = decrypted_with_shift.count(__lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_snake_case : Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_snake_case : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_snake_case : List[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__lowercase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_snake_case : int = min(
__lowercase , key=__lowercase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_snake_case
) ,(
_snake_case
) ,
) : Optional[Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 580 | import math
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
if (
not isinstance(__lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
if (
not isinstance(__lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 580 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowercase ( lowerCamelCase : int , lowerCamelCase : bool = True , lowerCamelCase : float = math.inf , lowerCamelCase : float = -math.inf , lowerCamelCase : float = math.inf , lowerCamelCase : float = -math.inf , lowerCamelCase : bool = False , lowerCamelCase : float = 100 , lowerCamelCase : float = 0.0_1 , lowerCamelCase : float = 1 , ):
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : str = search_prob
UpperCamelCase_ : List[Any] = start_temperate
UpperCamelCase_ : int = []
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : List[str] = None
while not search_end:
UpperCamelCase_ : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase_ : Any = current_state
scores.append(lowerCamelCase_ )
iterations += 1
UpperCamelCase_ : List[str] = None
UpperCamelCase_ : Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase_ : Optional[int] = random.randint(0 , len(lowerCamelCase_ ) - 1 ) # picking a random neighbor
UpperCamelCase_ : List[Any] = neighbors.pop(lowerCamelCase_ )
UpperCamelCase_ : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase_ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase_ : Dict = picked_neighbor
else:
UpperCamelCase_ : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase_ : Union[str, Any] = picked_neighbor
UpperCamelCase_ : Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase_ : List[str] = True
else:
UpperCamelCase_ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCamelCase_ ) , lowerCamelCase_ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __lowercase ( lowerCamelCase : int , lowerCamelCase : List[str] ):
return (3 * x**2) - (6 * y)
a_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
a_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 417 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
from math import factorial, radians
def UpperCamelCase ( _A : float , _A : int = 18 , _A : int = 10 )-> float:
"""simple docstring"""
A__ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
A__ = radians(_A )
A__ = angle_in_radians
A__ = 3
A__ = -1
for _ in range(_A ):
result += (b * (angle_in_radians**a)) / factorial(_A )
A__ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_A , _A )
if __name__ == "__main__":
__import__("doctest").testmod()
| 232 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Optional[int] = 16
UpperCAmelCase_ : List[Any] = 32
def UpperCamelCase ( _A : Accelerator , _A : int = 16 )-> Dict:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A : Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_A , padding="longest" , max_length=_A , pad_to_multiple_of=_A , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=_A )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _A : str , _A : List[str] )-> Union[str, Any]:
"""simple docstring"""
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_A )
A__ , A__ = get_dataloaders(_A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_A )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_A )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_A , references=_A , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _A )
def UpperCamelCase ( )-> Any:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_A , default=_A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
A__ = parser.parse_args()
A__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 232 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = (DEISMultistepScheduler,)
lowerCAmelCase : Optional[int] = (("num_inference_steps", 2_5),)
def UpperCAmelCase ( self : Any ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**_snake_case )
return config
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Tuple=0 ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = dict(self.forward_default_kwargs )
lowercase__ : Any = kwargs.pop('''num_inference_steps''' ,_snake_case )
lowercase__ : Dict = self.dummy_sample
lowercase__ : str = 0.1 * sample
lowercase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase__ : List[str] = self.get_scheduler_config(**_snake_case )
lowercase__ : int = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowercase__ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowercase__ : List[Any] = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowercase__ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ , lowercase__ : List[str] = sample, sample
for t in range(_snake_case ,time_step + scheduler.config.solver_order + 1 ):
lowercase__ : Any = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : Tuple = new_scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ,_snake_case : Tuple=0 ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = dict(self.forward_default_kwargs )
lowercase__ : Tuple = kwargs.pop('''num_inference_steps''' ,_snake_case )
lowercase__ : Optional[Any] = self.dummy_sample
lowercase__ : Optional[Any] = 0.1 * sample
lowercase__ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowercase__ : List[Any] = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ : Dict = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : Any = new_scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : Tuple ,_snake_case : int=None ,**_snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if scheduler is None:
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config(**_snake_case )
lowercase__ : Any = scheduler_class(**_snake_case )
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config(**_snake_case )
lowercase__ : str = scheduler_class(**_snake_case )
lowercase__ : List[Any] = 10
lowercase__ : str = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Union[str, Any] = model(_snake_case ,_snake_case )
lowercase__ : int = scheduler.step(_snake_case ,_snake_case ,_snake_case ).prev_sample
return sample
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = dict(self.forward_default_kwargs )
lowercase__ : Optional[Any] = kwargs.pop('''num_inference_steps''' ,_snake_case )
for scheduler_class in self.scheduler_classes:
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**_snake_case )
lowercase__ : str = self.dummy_sample
lowercase__ : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case ,'''set_timesteps''' ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case ,'''set_timesteps''' ):
lowercase__ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase__ : int = dummy_past_residuals[: scheduler.config.solver_order]
lowercase__ : Union[str, Any] = scheduler.timesteps[5]
lowercase__ : Tuple = scheduler.timesteps[6]
lowercase__ : Optional[int] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : Optional[int] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = DEISMultistepScheduler(**self.get_scheduler_config() )
lowercase__ : List[Any] = self.full_loop(scheduler=_snake_case )
lowercase__ : Optional[int] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
lowercase__ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase__ : int = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase__ : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase__ : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowercase__ : str = self.full_loop(scheduler=_snake_case )
lowercase__ : str = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case ,prediction_type=_snake_case ,sample_max_value=_snake_case ,algorithm_type='''deis''' ,solver_order=_snake_case ,solver_type=_snake_case ,)
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case ,solver_type=_snake_case ,prediction_type=_snake_case ,algorithm_type=_snake_case ,)
lowercase__ : str = self.full_loop(
solver_order=_snake_case ,solver_type=_snake_case ,prediction_type=_snake_case ,algorithm_type=_snake_case ,)
assert not torch.isnan(_snake_case ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case )
self.check_over_configs(lower_order_final=_snake_case )
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_snake_case ,time_step=0 )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : int = self.full_loop()
lowercase__ : Any = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.full_loop(prediction_type='''v_prediction''' )
lowercase__ : int = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config(thresholding=_snake_case ,dynamic_thresholding_ratio=0 )
lowercase__ : Union[str, Any] = scheduler_class(**_snake_case )
lowercase__ : Tuple = 10
lowercase__ : Optional[Any] = self.dummy_model()
lowercase__ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : int = model(_snake_case ,_snake_case )
lowercase__ : List[Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 560 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "SpeechT5FeatureExtractor"
lowerCAmelCase : Optional[Any] = "SpeechT5Tokenizer"
def __init__( self : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : str ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case ,_snake_case )
def __call__( self : str ,*_snake_case : str ,**_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = kwargs.pop('''audio''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''text''' ,_snake_case )
lowercase__ : Tuple = kwargs.pop('''text_target''' ,_snake_case )
lowercase__ : str = kwargs.pop('''audio_target''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''sampling_rate''' ,_snake_case )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__ : Optional[Any] = self.feature_extractor(_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
elif text is not None:
lowercase__ : Dict = self.tokenizer(_snake_case ,**_snake_case )
else:
lowercase__ : int = None
if audio_target is not None:
lowercase__ : Tuple = self.feature_extractor(audio_target=_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
lowercase__ : List[Any] = targets['''input_values''']
elif text_target is not None:
lowercase__ : int = self.tokenizer(_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = targets['''input_ids''']
else:
lowercase__ : List[str] = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Tuple = labels
lowercase__ : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : str = decoder_attention_mask
return inputs
def UpperCAmelCase ( self : str ,*_snake_case : List[Any] ,**_snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = kwargs.pop('''input_values''' ,_snake_case )
lowercase__ : Any = kwargs.pop('''input_ids''' ,_snake_case )
lowercase__ : List[Any] = kwargs.pop('''labels''' ,_snake_case )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__ : List[Any] = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
elif input_ids is not None:
lowercase__ : Union[str, Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
else:
lowercase__ : int = None
if labels is not None:
if "input_ids" in labels or (isinstance(_snake_case ,_snake_case ) and "input_ids" in labels[0]):
lowercase__ : List[Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
lowercase__ : Optional[int] = targets['''input_ids''']
else:
lowercase__ : int = self.feature_extractor.feature_size
lowercase__ : str = self.feature_extractor.num_mel_bins
lowercase__ : int = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
lowercase__ : Tuple = feature_size_hack
lowercase__ : int = targets['''input_values''']
else:
lowercase__ : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : str = labels
lowercase__ : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase ( self : Optional[int] ,*_snake_case : List[Any] ,**_snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[str] ,*_snake_case : Dict ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
| 560 | 1 |
import colorsys
from PIL import Image # type: ignore
def UpperCAmelCase__ ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : int ) -> List[str]:
__a : List[Any] = x
__a : Any = y
for step in range(lowerCamelCase_ ): # noqa: B007
__a : Union[str, Any] = a * a - b * b + x
__a : Any = 2 * a * b + y
__a : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCAmelCase__ ( lowerCamelCase_ : float ) -> Dict:
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def UpperCAmelCase__ ( lowerCamelCase_ : float ) -> List[Any]:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowerCamelCase_ , 1 , 1 ) )
def UpperCAmelCase__ ( lowerCamelCase_ : int = 8_0_0 , lowerCamelCase_ : int = 6_0_0 , lowerCamelCase_ : float = -0.6 , lowerCamelCase_ : float = 0 , lowerCamelCase_ : float = 3.2 , lowerCamelCase_ : int = 5_0 , lowerCamelCase_ : bool = True , ) -> Union[str, Any]:
__a : List[str] = Image.new('RGB' , (image_width, image_height) )
__a : Union[str, Any] = img.load()
# loop through the image-coordinates
for image_x in range(lowerCamelCase_ ):
for image_y in range(lowerCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
__a : Optional[Any] = figure_width / image_width * image_height
__a : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
__a : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__a : Optional[Any] = get_distance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__a : List[Any] = get_color_coded_rgb(lowerCamelCase_ )
else:
__a : Union[str, Any] = get_black_and_white_rgb(lowerCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
SCREAMING_SNAKE_CASE__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 718 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/pegasus-xsum''': 512,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = PegasusTokenizer
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Tuple="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : int="<unk>" , SCREAMING_SNAKE_CASE__ : int="<mask_2>" , SCREAMING_SNAKE_CASE__ : int="<mask_1>" , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[Any]=1_0_3 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
'''simple docstring'''
__a : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE__ )}, but is'''
f''' {type(SCREAMING_SNAKE_CASE__ )}''' )
__a : Tuple = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(SCREAMING_SNAKE_CASE__ ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE__ ) ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__a : Tuple = additional_special_tokens_extended
else:
__a : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , mask_token_sent=SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : Tuple = vocab_file
__a : List[str] = False if not self.vocab_file else True
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List , SCREAMING_SNAKE_CASE__ : Optional[List] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE__ )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : int = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 577 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __magic_name__ ( _lowerCAmelCase):
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[Any] = None
class __magic_name__ ( _lowerCAmelCase):
def __init__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int=1 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0 ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,__SCREAMING_SNAKE_CASE : str="cls" ,__SCREAMING_SNAKE_CASE : Union[str, Any]=False ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,**__SCREAMING_SNAKE_CASE : Optional[Any] ,):
super().__init__(pad_token_id=UpperCamelCase_ ,bos_token_id=UpperCamelCase_ ,eos_token_id=UpperCamelCase_ ,**UpperCamelCase_ )
UpperCAmelCase = project_dim
UpperCAmelCase = pooler_fn
UpperCAmelCase = learn_encoder
UpperCAmelCase = use_attention_mask
class __magic_name__ ( _lowerCAmelCase):
_UpperCAmelCase : List[Any] = [r'pooler', r'logit_scale']
_UpperCAmelCase : List[str] = [r'position_ids', r'predictions.decoder.bias']
_UpperCAmelCase : Union[str, Any] = 'roberta'
_UpperCAmelCase : List[Any] = RobertaSeriesConfig
def __init__( self : Any ,__SCREAMING_SNAKE_CASE : Any ):
super().__init__(UpperCamelCase_ )
UpperCAmelCase = XLMRobertaModel(UpperCamelCase_ )
UpperCAmelCase = nn.Linear(config.hidden_size ,config.project_dim )
UpperCAmelCase = getattr(UpperCamelCase_ ,"has_pre_transformation" ,UpperCamelCase_ )
if self.has_pre_transformation:
UpperCAmelCase = nn.Linear(config.hidden_size ,config.project_dim )
UpperCAmelCase = nn.LayerNorm(config.hidden_size ,eps=config.layer_norm_eps )
self.post_init()
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,__SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,__SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,__SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,__SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,__SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,__SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,__SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,):
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = self.base_model(
input_ids=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,position_ids=UpperCamelCase_ ,head_mask=UpperCamelCase_ ,inputs_embeds=UpperCamelCase_ ,encoder_hidden_states=UpperCamelCase_ ,encoder_attention_mask=UpperCamelCase_ ,output_attentions=UpperCamelCase_ ,output_hidden_states=True if self.has_pre_transformation else output_hidden_states ,return_dict=UpperCamelCase_ ,)
if self.has_pre_transformation:
UpperCAmelCase = outputs['''hidden_states'''][-2]
UpperCAmelCase = self.pre_LN(UpperCamelCase_ )
UpperCAmelCase = self.transformation_pre(UpperCamelCase_ )
return TransformationModelOutput(
projection_state=UpperCamelCase_ ,last_hidden_state=outputs.last_hidden_state ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
else:
UpperCAmelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase_ ,last_hidden_state=outputs.last_hidden_state ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 333 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __snake_case (__UpperCAmelCase , __UpperCAmelCase=7 ):
"""simple docstring"""
lowerCamelCase_ : List[Any] = None
if token is not None:
lowerCamelCase_ : Optional[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowerCamelCase_ : List[str] = '''636036'''
lowerCamelCase_ : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowerCamelCase_ : Optional[Any] = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
return result["workflow_runs"]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_daily_ci_runs(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase_ : Tuple = workflow_run['''id''']
break
return workflow_run_id
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = get_last_daily_ci_runs(__UpperCAmelCase )
if workflow_run_id is not None:
lowerCamelCase_ : List[str] = get_artifacts_links(worflow_run_id=__UpperCAmelCase , token=__UpperCAmelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase_ : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__UpperCAmelCase , artifact_url=__UpperCAmelCase , output_dir=__UpperCAmelCase , token=__UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
get_last_daily_ci_artifacts(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = {}
for artifact_name in artifact_names:
lowerCamelCase_ : List[Any] = os.path.join(__UpperCAmelCase , F"""{artifact_name}.zip""" )
if os.path.isfile(__UpperCAmelCase ):
lowerCamelCase_ : int = {}
with zipfile.ZipFile(__UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCAmelCase ):
# read the file
with z.open(__UpperCAmelCase ) as f:
lowerCamelCase_ : List[str] = f.read().decode('''UTF-8''' )
return results
| 501 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["GLPNFeatureExtractor"]
lowerCamelCase__ = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger()
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : nn.Module
UpperCamelCase_ : List[nn.Module] = field(default_factory=__lowercase )
UpperCamelCase_ : list = field(default_factory=__lowercase )
def A_ ( self , a , a , a ) -> str:
'''simple docstring'''
_UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(a , nn.Convad ) or isinstance(a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a )
def __call__( self , a ) -> Optional[int]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a )
[x.remove() for x in self.handles]
return self
@property
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : nn.Module
UpperCamelCase_ : nn.Module
UpperCamelCase_ : int = 0
UpperCamelCase_ : List = field(default_factory=__lowercase )
UpperCamelCase_ : List = field(default_factory=__lowercase )
def __call__( self , a ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = Tracker(self.dest )(a ).parametrized
_UpperCamelCase = Tracker(self.src )(a ).parametrized
_UpperCamelCase = list(filter(lambda a : type(a ) not in self.src_skip , a ) )
_UpperCamelCase = list(filter(lambda a : type(a ) not in self.dest_skip , a ) )
if len(a ) != len(a ):
raise Exception(
F'Numbers of operations are different. Source module has {len(a )} operations while'
F' destination module has {len(a )}.' )
for dest_m, src_m in zip(a , a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True ) -> Optional[Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
_UpperCamelCase = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase ).eval()
_UpperCamelCase = ResNetForImageClassification(lowerCAmelCase ).eval()
_UpperCamelCase = ModuleTransfer(src=lowerCAmelCase , dest=lowerCAmelCase )
_UpperCamelCase = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowerCAmelCase )
assert torch.allclose(from_model(lowerCAmelCase ) , our_model(lowerCAmelCase ).logits ), "The model logits don't match the original one."
_UpperCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
# we can use the convnext one
_UpperCamelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __A(lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = """imagenet-1k-id2label.json"""
_UpperCamelCase = 1_0_0_0
_UpperCamelCase = (1, num_labels)
_UpperCamelCase = """huggingface/label-files"""
_UpperCamelCase = num_labels
_UpperCamelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_UpperCamelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = partial(lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase )
_UpperCamelCase = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase , names_to_config[model_name] , lowerCAmelCase , lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 202 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( __snake_case ):
A__ : List[Any] = 'gpt_neox_japanese'
def __init__( self , UpperCAmelCase=3_2_0_0_0 , UpperCAmelCase=2_5_6_0 , UpperCAmelCase=3_2 , UpperCAmelCase=3_2 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=1.00 , UpperCAmelCase=1_0_0_0_0 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=3_1_9_9_6 , UpperCAmelCase=3_1_9_9_9 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , **UpperCAmelCase , ) -> int:
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_multiple_size
__a = hidden_act
__a = rotary_pct
__a = rotary_emb_base
__a = initializer_range
__a = layer_norm_eps
__a = use_cache
__a = attention_dropout
__a = hidden_dropout
| 559 | import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase_ : Any = logging.getLogger(__name__)
lowerCamelCase_ : int = 50 # max width of layer names
lowerCamelCase_ : Any = 70 # max width of quantizer names
def lowerCAmelCase( __lowerCamelCase ):
__a = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__lowerCamelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__lowerCamelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__lowerCamelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__lowerCamelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__lowerCamelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__lowerCamelCase , type=__lowerCamelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__lowerCamelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def lowerCAmelCase( __lowerCamelCase ):
if args.calibrator == "max":
__a = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
__a = 'histogram'
elif args.calibrator == "mse":
__a = 'histogram'
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
__a = QuantDescriptor(num_bits=args.aprec , calib_method=__lowerCamelCase )
__a = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ):
logger.info('Configuring Model for Quantization' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCamelCase , ['embeddings'] , which='weight' , _disabled=__lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCamelCase , [''] , _disabled=__lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCamelCase , args.quant_disable_keyword , _disabled=__lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCamelCase , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCamelCase , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCamelCase , __lowerCamelCase )
if args.clip_gelu:
clip_gelu(__lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
def fusea(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCamelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
__a = qq._amax.detach().item()
__a = qk._amax.detach().item()
__a = qv._amax.detach().item()
__a = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
qq._amax.fill_(__lowerCamelCase )
qk._amax.fill_(__lowerCamelCase )
qv._amax.fill_(__lowerCamelCase )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
__a = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCamelCase )
__a = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def lowerCAmelCase( __lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
__a = mod.weight.shape[0]
__a = mod._weight_quantizer._amax.detach()
__a = torch.ones(__lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def lowerCAmelCase( __lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__a = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__a = set(range(len(mod.weight.size() ) ) ) - axis_set
__a = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowerCamelCase , keepdims=__lowerCamelCase ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
__a = amax
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=25 , __lowerCamelCase=180 , __lowerCamelCase=None ):
if ignore is None:
__a = []
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
__a = [ignore]
__a = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCamelCase , 'weight' ):
continue
__a = max(__lowerCamelCase , len(__lowerCamelCase ) )
for name, mod in model.named_modules():
__a = getattr(__lowerCamelCase , '_input_quantizer' , __lowerCamelCase )
__a = getattr(__lowerCamelCase , '_weight_quantizer' , __lowerCamelCase )
if not hasattr(__lowerCamelCase , 'weight' ):
continue
if type(__lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCamelCase ) is str and s in name]:
continue
__a = f'''Act:{input_q.extra_repr()}'''
__a = f'''Wgt:{weight_q.extra_repr()}'''
__a = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__lowerCamelCase ) <= line_width:
logger.info(__lowerCamelCase )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def lowerCAmelCase( __lowerCamelCase ):
__a = 0
for name, mod in model.named_modules():
if isinstance(__lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCamelCase , __lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="both" , **__lowerCamelCase ):
__a = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__lowerCamelCase , __lowerCamelCase , '_input_quantizer' , __lowerCamelCase , __lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCamelCase , __lowerCamelCase , '_weight_quantizer' , __lowerCamelCase , __lowerCamelCase )
logger.info(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_input_quantizer' ) or hasattr(__lowerCamelCase , '_weight_quantizer' ):
for n in names:
if re.search(__lowerCamelCase , __lowerCamelCase ):
set_quantizers(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__lowerCamelCase , __lowerCamelCase ):
__a = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
logger.info(__lowerCamelCase )
| 559 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Tuple:
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase : float , __lowerCamelCase : float ) -> bool:
_snake_case = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_snake_case = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
_snake_case = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Callable[[float], float] , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 1.0 ) -> None:
def identity_function(__lowerCamelCase : float ) -> float:
return x
_snake_case = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> None:
def function_to_integrate(__lowerCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
_snake_case = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case , _snake_case = analyze_text(__lowerCamelCase )
_snake_case = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
_snake_case = sum(single_char_strings.values() )
# one length string
_snake_case = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_snake_case = single_char_strings[ch]
_snake_case = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_snake_case = sum(two_char_strings.values() )
_snake_case = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_snake_case = cha + cha
if sequence in two_char_strings:
_snake_case = two_char_strings[sequence]
_snake_case = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> tuple[dict, dict]:
_snake_case = Counter() # type: ignore
_snake_case = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _UpperCAmelCase ( ) -> Union[str, Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 430 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowercase_ ( __A : Tuple , __A : List[str] ) -> List[Any]:
"""simple docstring"""
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , '''config.json''' ) ) and os.path.isfile(
os.path.join(__A , '''config.json''' ) ):
os.remove(os.path.join(__A , '''config.json''' ) )
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__A , '''pytorch_model.bin''' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def lowercase_ ( __A : Tuple , __A : Dict=False ) -> int:
"""simple docstring"""
lowercase : List[str] =2
if unlogit:
lowercase : str =torch.pow(__A , __A )
lowercase : List[str] =p * torch.log(__A )
lowercase : Optional[int] =0
return -plogp.sum(dim=-1 )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'{x + 1}' for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def lowercase_ ( __A : Dict , __A : Optional[int] , __A : Tuple , __A : Optional[Any]=True , __A : List[Any]=True , __A : Tuple=None , __A : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Any =model.config.num_hidden_layers, model.config.num_attention_heads
lowercase : int =torch.zeros(__A , __A ).to(args.device )
lowercase : Optional[int] =torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
lowercase : Optional[Any] =torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase : List[Any] =None
lowercase : str =0.0
lowercase : Tuple =0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowercase : List[str] =tuple(t.to(args.device ) for t in inputs )
((lowercase) , ) : str =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase : List[Any] =model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase , lowercase , lowercase : Any =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
lowercase : int =entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase : Optional[int] =2
lowercase : Optional[Any] =torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowercase : Union[str, Any] =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__A )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__A )
logger.info('''Head ranked by importance scores''' )
lowercase : int =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase : Optional[Any] =torch.arange(
head_importance.numel() , device=args.device )
lowercase : int =head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def lowercase_ ( __A : Any , __A : int , __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase , lowercase , lowercase : List[Any] =compute_heads_importance(__A , __A , __A , compute_entropy=__A )
lowercase : str =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold )
lowercase : Optional[int] =torch.ones_like(__A )
lowercase : Optional[int] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase : List[str] =original_score
while current_score >= original_score * args.masking_threshold:
lowercase : Optional[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase : List[str] =float('''Inf''' )
lowercase : int =head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowercase : Optional[int] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowercase : str =new_head_mask.view(-1 )
lowercase : int =0.0
lowercase : int =new_head_mask.view_as(__A )
lowercase : Union[str, Any] =new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
lowercase , lowercase , lowercase : int =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
lowercase : Optional[int] =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('''Final head mask''' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ ( __A : Optional[Any] , __A : Dict , __A : Dict , __A : Tuple ) -> Dict:
"""simple docstring"""
lowercase : str =datetime.now()
lowercase , lowercase , lowercase : Optional[int] =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
lowercase : str =1 / loss
lowercase : int =datetime.now() - before_time
lowercase : Optional[Any] =sum(p.numel() for p in model.parameters() )
lowercase : int ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
lowercase : Optional[Any] =[
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
lowercase : Union[str, Any] =sum(p.numel() for p in model.parameters() )
lowercase : Union[str, Any] =datetime.now()
lowercase , lowercase , lowercase : Union[str, Any] =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
lowercase : Union[str, Any] =1 / loss
lowercase : str =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 )
save_model(__A , args.output_dir )
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__A , default=4_2 )
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''' )
lowercase : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase : Union[str, Any] =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowercase : Tuple =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase : Optional[Any] =torch.device('''cuda''' , args.local_rank )
lowercase : int =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase : List[Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase : Tuple =nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
lowercase : List[str] =nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __A )
# Prepare dataset
lowercase : int =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase : Any =(torch.from_numpy(__A ),)
lowercase : str =TensorDataset(*__A )
lowercase : List[Any] =RandomSampler(__A )
lowercase : List[Any] =DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase : Tuple =mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 94 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : List[str] = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase : List[str] = {
"gpt2": 10_24,
"gpt2-medium": 10_24,
"gpt2-large": 10_24,
"gpt2-xl": 10_24,
"distilgpt2": 10_24,
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer
def __init__( self : Optional[Any] , A : Optional[int]=None , A : List[Any]=None , A : Optional[int]=None , A : Optional[Any]="<|endoftext|>" , A : Union[str, Any]="<|endoftext|>" , A : Optional[Any]="<|endoftext|>" , A : Optional[Any]=False , **A : str , ):
super().__init__(
A , A , tokenizer_file=A , unk_token=A , bos_token=A , eos_token=A , add_prefix_space=A , **A , )
_UpperCAmelCase : List[Any] = kwargs.pop("add_bos_token" , A )
_UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A ) != add_prefix_space:
_UpperCAmelCase : Dict = getattr(A , pre_tok_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = add_prefix_space
_UpperCAmelCase : Optional[Any] = pre_tok_class(**A )
_UpperCAmelCase : List[Any] = add_prefix_space
def snake_case_ ( self : List[Any] , *A : List[Any] , **A : List[str] ):
_UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def snake_case_ ( self : Optional[Any] , *A : List[Any] , **A : int ):
_UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def snake_case_ ( self : Optional[int] , A : str , A : Optional[str] = None ):
_UpperCAmelCase : List[Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def snake_case_ ( self : int , A : "Conversation" ):
_UpperCAmelCase : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A , add_special_tokens=A ) + [self.eos_token_id] )
if len(A ) > self.model_max_length:
_UpperCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 289 | 0 |
"""simple docstring"""
import argparse
lowercase = """docs/source/_static/js/custom.js"""
def A__ ( _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , encoding="utf-8" , newline="\n" ) as f:
snake_case__ : List[Any] = f.readlines()
snake_case__ : str = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
snake_case__ : int = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(_UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
lowercase = parser.parse_args()
update_custom_js(args.version)
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[int] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
def A__ ( *_UpperCAmelCase : str , **_UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[int] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
| 150 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : int = 32 , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCamelCase : Optional[Union[float, List[float]]] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCamelCase : bool = True , lowerCamelCase : Union[str, Any]=7 , lowerCamelCase : int=30 , lowerCamelCase : Union[str, Any]=400 , lowerCamelCase : Union[str, Any]=3 , ) -> Tuple:
__snake_case : int = parent
__snake_case : Optional[Any] = do_resize
__snake_case : List[Any] = size if size is not None else {"shortest_edge": 288}
__snake_case : Tuple = size_divisor
__snake_case : List[Any] = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : Optional[int] = do_normalize
__snake_case : Optional[Any] = do_center_crop
__snake_case : List[str] = image_mean
__snake_case : Optional[int] = image_std
__snake_case : Optional[int] = do_pad
__snake_case : Tuple = batch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[int] = min_resolution
__snake_case : Any = max_resolution
def __snake_case ( self : List[Any] ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]=False ) -> str:
if not batched:
__snake_case : Tuple = self.size["shortest_edge"]
__snake_case : Optional[Any] = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__snake_case , __snake_case : List[Any] = image.size
else:
__snake_case , __snake_case : Dict = image.shape[1], image.shape[2]
__snake_case : Dict = size / min(lowerCamelCase , lowerCamelCase )
if h < w:
__snake_case , __snake_case : Union[str, Any] = size, scale * w
else:
__snake_case , __snake_case : List[Any] = scale * h, size
__snake_case : int = int((1333 / 800) * size )
if max(lowerCamelCase , lowerCamelCase ) > max_size:
__snake_case : Union[str, Any] = max_size / max(lowerCamelCase , lowerCamelCase )
__snake_case : int = newh * scale
__snake_case : Optional[int] = neww * scale
__snake_case , __snake_case : Any = int(newh + 0.5 ), int(neww + 0.5 )
__snake_case , __snake_case : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__snake_case : Tuple = []
for image in image_inputs:
__snake_case , __snake_case : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__snake_case : Dict = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None
def __snake_case ( self : Dict ) -> int:
__snake_case : List[Any] = BridgeTowerImageProcessingTester(self )
@property
def __snake_case ( self : List[str] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Any ) -> Any:
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "size_divisor" ) )
def __snake_case ( self : List[str] ) -> List[Any]:
pass
def __snake_case ( self : List[str] ) -> Dict:
# Initialize image processor
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : int = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Optional[int] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Any = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : List[Any] ) -> List[Any]:
# Initialize image processor
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Optional[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : str ) -> Tuple:
# Initialize image processor
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 81 | """simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : Dict = TypeVar("KT")
__lowerCAmelCase : Optional[Any] = TypeVar("VT")
class a_ ( Generic[KT, VT] ):
def __init__( self : Tuple , snake_case__ : KT | str = "root" , snake_case__ : VT | None = None ):
lowerCAmelCase__ = key
lowerCAmelCase__ = value
lowerCAmelCase__ = []
def __repr__( self : Union[str, Any] ):
return F"""Node({self.key}: {self.value})"""
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return len(self.forward )
class a_ ( Generic[KT, VT] ):
def __init__( self : int , snake_case__ : float = 0.5 , snake_case__ : int = 16 ):
lowerCAmelCase__ = Node[KT, VT]()
lowerCAmelCase__ = 0
lowerCAmelCase__ = p
lowerCAmelCase__ = max_level
def __str__( self : int ):
lowerCAmelCase__ = list(self )
if len(snake_case__ ) == 0:
return F"""SkipList(level={self.level})"""
lowerCAmelCase__ = max((len(str(snake_case__ ) ) for item in items) , default=4 )
lowerCAmelCase__ = max(snake_case__ , 4 ) + 4
lowerCAmelCase__ = self.head
lowerCAmelCase__ = []
lowerCAmelCase__ = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(snake_case__ , """-""" ) + """* """ * len(snake_case__ ) )
lines.append(""" """ * label_size + """| """ * len(snake_case__ ) )
while len(node.forward ) != 0:
lowerCAmelCase__ = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(snake_case__ , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(snake_case__ ) )
lowerCAmelCase__ = node.forward
lines.append("""None""".ljust(snake_case__ ) + """* """ * len(snake_case__ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(snake_case__ )
def __iter__( self : Any ):
lowerCAmelCase__ = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCAmelCase__ = node.forward[0]
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : List[str] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCAmelCase__ = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : KT ):
lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ )
if node is not None:
for i, update_node in enumerate(snake_case__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCAmelCase__ = node.forward[i]
else:
lowerCAmelCase__ = update_node.forward[:i]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : KT , snake_case__ : VT ):
lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ )
if node is not None:
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case__ ):
update_vector.append(self.head )
lowerCAmelCase__ = level
lowerCAmelCase__ = Node(snake_case__ , snake_case__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case__ )
else:
lowerCAmelCase__ = new_node
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : VT ):
lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ )
if node is not None:
return node.value
return None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
lowerCAmelCase__ = skip_list.head
lowerCAmelCase__ = {}
while node.level != 0:
lowerCAmelCase__ = node.forward[0]
lowerCAmelCase__ = node.value
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
lowerCAmelCase__ = skip_list.head
lowerCAmelCase__ = {}
while node.level != 0:
lowerCAmelCase__ = node.forward[0]
lowerCAmelCase__ = node.value
if len(lowerCamelCase__ ) != 4:
print()
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
assert skip_list.find("""Some key""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(lowerCamelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _UpperCAmelCase ( ):
"""simple docstring"""
def is_sorted(lowerCamelCase__ ):
return all(next_item >= item for item, next_item in zip(lowerCamelCase__ , lst[1:] ) )
lowerCAmelCase__ = SkipList()
for i in range(10 ):
skip_list.insert(lowerCamelCase__ , lowerCamelCase__ )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCamelCase__ ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 644 | 0 |
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = ['pixel_values']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 256}
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ = get_resize_output_image_size(__magic_name__ , size=size["shortest_edge"] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Tuple , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : List[Tuple] = None ):
"""simple docstring"""
lowerCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__magic_name__ ):
lowerCAmelCase__ = target_sizes.numpy()
lowerCAmelCase__ = []
for idx in range(len(__magic_name__ ) ):
lowerCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__magic_name__ )
lowerCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
lowerCAmelCase__ = logits.argmax(dim=1 )
lowerCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 48 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase_ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = features.mean(1 )
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.shape[1]
lowerCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
lowerCAmelCase__ = features.mean(1 )
# Center the dataset
lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) )
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1]
lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ ,lowerCAmelCase__ = eigh(
covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ )
lowerCAmelCase__ = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = linear_discriminant_analysis(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ )
if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
a_ = len(A__ )
a_ = len(matrix[0] )
a_ = min(A__ , A__ )
for row in range(A__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , A__ ):
a_ = matrix[col][row] / matrix[row][row]
for i in range(A__ , A__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
a_ = True
for i in range(row + 1 , A__ ):
if matrix[i][row] != 0:
a_ , a_ = matrix[i], matrix[row]
a_ = False
break
if reduce:
rank -= 1
for i in range(A__ ):
a_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
def UpperCamelCase_ ( A__ = 50 ):
a_ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 511 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_SCREAMING_SNAKE_CASE : Tuple = Vector()
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__lowerCamelCase ) , "(0,0,0,0,0,1)" )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(__lowerCamelCase ) , 4 )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = Vector([1, 2] )
_SCREAMING_SNAKE_CASE : Tuple = Vector([1, 2, 3, 4, 5] )
_SCREAMING_SNAKE_CASE : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_SCREAMING_SNAKE_CASE : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : List[Any] = Vector([1, 2, 3] )
_SCREAMING_SNAKE_CASE : Tuple = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Dict = Vector([1, 2, 3] )
_SCREAMING_SNAKE_CASE : List[str] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : str = Vector([1, 2, 3] )
_SCREAMING_SNAKE_CASE : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
_SCREAMING_SNAKE_CASE : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def UpperCamelCase_ ( self ) -> None:
self.assertEqual(str(zero_vector(1_0 ) ).count("0" ) , 1_0 )
def UpperCamelCase_ ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Dict = Vector([1, 2, 3] )
_SCREAMING_SNAKE_CASE : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __lowerCamelCase , __lowerCamelCase ) ) , "(3,4,7)" )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
_SCREAMING_SNAKE_CASE : List[Any] = x.copy()
self.assertEqual(str(__lowerCamelCase ) , str(__lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : List[str] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__lowerCamelCase ) , "(0,1,0)" )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(__lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_SCREAMING_SNAKE_CASE : Dict = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_SCREAMING_SNAKE_CASE : Tuple = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(__lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_SCREAMING_SNAKE_CASE : Any = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_SCREAMING_SNAKE_CASE : Any = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def UpperCamelCase_ ( self ) -> None:
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 249 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ (__lowerCamelCase = "AAPL" ):
_SCREAMING_SNAKE_CASE : Dict = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_SCREAMING_SNAKE_CASE : str = BeautifulSoup(requests.get(__lowerCamelCase ).text, "html.parser" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div", class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}") | 249 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCAmelCase :List[str] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[str] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCAmelCase :List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : Dict = LEDConfig
snake_case__ : Dict = {}
snake_case__ : int = "gelu"
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[int] = global_attention_mask
return config, inputs_dict
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = TFLEDModel(config=lowercase__ ).get_decoder()
SCREAMING_SNAKE_CASE : int = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : int = model(lowercase__ , attention_mask=lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Tuple = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def __lowerCAmelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : int = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case__ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : Union[str, Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = True
snake_case__ : Tuple = False
snake_case__ : Dict = False
snake_case__ : int = False
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros_like(inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : List[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def _UpperCamelCase ( self ) -> Tuple:
pass
def _UpperCamelCase ( self ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def __lowerCAmelCase ( a_ ) -> Any:
'''simple docstring'''
return tf.constant(a_ , dtype=tf.intaa )
_lowerCAmelCase :Union[str, Any] = 1E-4
@slow
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Any = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Any = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
SCREAMING_SNAKE_CASE : List[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 179 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = 1 / 2_5_5 , _lowercase = True , _lowercase = 8 , **_lowercase , ) -> None:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = do_rescale
snake_case_ : List[str] = rescale_factor
snake_case_ : Optional[int] = do_pad
snake_case_ : List[str] = pad_size
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Any = get_image_size(_lowercase )
snake_case_ : List[str] = (old_height // size + 1) * size - old_height
snake_case_ : int = (old_width // size + 1) * size - old_width
return pad(_lowercase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
snake_case_ : int = pad_size if pad_size is not None else self.pad_size
snake_case_ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_lowercase ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_pad:
snake_case_ : Union[str, Any] = [self.pad(_lowercase , size=_lowercase ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
snake_case_ : str = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
'''simple docstring'''
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
lowerCAmelCase__ = {} # Mapping from char to TrieNode
lowerCAmelCase__ = False
def a ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> None:
for word in words:
self.insert(_a )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> None:
lowerCAmelCase__ = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase__ = TrieNode()
lowerCAmelCase__ = curr.nodes[char]
lowerCAmelCase__ = True
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> bool:
lowerCAmelCase__ = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase__ = curr.nodes[char]
return curr.is_leaf
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> None:
def _delete(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase__ = False
return len(curr.nodes ) == 0
lowerCAmelCase__ = word[index]
lowerCAmelCase__ = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase__ = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if node.is_leaf:
print(snake_case_ , end=" " )
for key, value in node.nodes.items():
print_words(snake_case_ , word + key )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase__ = TrieNode()
root.insert_many(snake_case_ )
# print_words(root, "")
assert all(root.find(snake_case_ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
print(str(snake_case_ ) , "works!" if passes else "doesn't work :(" )
def _A ( ):
"""simple docstring"""
assert test_trie()
def _A ( ):
"""simple docstring"""
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 710 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCamelCase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location="cpu" )
return sd
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=rename_keys_prefix ):
"""simple docstring"""
lowerCAmelCase__ = OrderedDict()
lowerCAmelCase__ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase__ = key
for name_pair in rename_keys_prefix:
lowerCAmelCase__ = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase__ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase__ = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase__ = "pretraining"
if "vcr" in checkpoint_path:
lowerCAmelCase__ = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase__ = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
lowerCAmelCase__ = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
lowerCAmelCase__ = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase__ = {"visual_embedding_dim": 512}
lowerCAmelCase__ = "multichoice"
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase__ = {"visual_embedding_dim": 2048}
lowerCAmelCase__ = "vqa_advanced"
elif "vqa" in checkpoint_path:
lowerCAmelCase__ = {"visual_embedding_dim": 2048, "num_labels": 3129}
lowerCAmelCase__ = "vqa"
elif "nlvr" in checkpoint_path:
lowerCAmelCase__ = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
lowerCAmelCase__ = "nlvr"
lowerCAmelCase__ = VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
lowerCAmelCase__ = load_state_dict(lowerCAmelCase_ )
lowerCAmelCase__ = get_new_dict(lowerCAmelCase_ , lowerCAmelCase_ )
if model_type == "pretraining":
lowerCAmelCase__ = VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
lowerCAmelCase__ = VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
lowerCAmelCase__ = VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
lowerCAmelCase__ = VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 125 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.