code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def __snake_case ( lowerCAmelCase_ ) -> list[list[int]]:
SCREAMING_SNAKE_CASE__ = []
if len(lowerCAmelCase_ ) == 1:
return [nums.copy()]
for _ in range(len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = nums.pop(0 )
SCREAMING_SNAKE_CASE__ = permute(lowerCAmelCase_ )
for perm in permutations:
perm.append(lowerCAmelCase_ )
result.extend(lowerCAmelCase_ )
nums.append(lowerCAmelCase_ )
return result
def __snake_case ( lowerCAmelCase_ ) -> int:
def backtrack(lowerCAmelCase_ ):
if start == len(lowerCAmelCase_ ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = nums[i], nums[start]
backtrack(start + 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = nums[i], nums[start] # backtrack
SCREAMING_SNAKE_CASE__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_A : List[str] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 100 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase : List[Any] = 'CompVis/stable-diffusion-v1-1'
lowerCamelCase : Union[str, Any] = 'CompVis/stable-diffusion-v1-2'
lowerCamelCase : List[str] = 'CompVis/stable-diffusion-v1-3'
lowerCamelCase : Any = 'CompVis/stable-diffusion-v1-4'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A = True , ) -> List[str]:
super()._init_()
snake_case : List[Any] = StableDiffusionPipeline.from_pretrained(A )
snake_case : str = StableDiffusionPipeline.from_pretrained(A )
snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained(A )
snake_case : Dict = StableDiffusionPipeline(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , requires_safety_checker=A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , A ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase ( self , A = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase ( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Any:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[int]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> List[Any]:
snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case : Tuple = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case : Tuple = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case : List[Any] = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case : Dict = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 587 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
# Construct model
if openai_config_file == "":
_lowercase : Any = OpenAIGPTConfig()
else:
_lowercase : Optional[int] = OpenAIGPTConfig.from_json_file(lowerCamelCase_ )
_lowercase : Union[str, Any] = OpenAIGPTModel(lowerCamelCase_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
_lowercase : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowercase : str = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 354 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = (DPMSolverSinglestepScheduler,)
lowercase_ : List[str] = (("""num_inference_steps""", 25),)
def UpperCamelCase ( self, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf'),
'variance_type': None,
}
config.update(**lowerCamelCase)
return config
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = dict(self.forward_default_kwargs)
_lowercase : Union[str, Any] = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : Optional[int] = self.dummy_sample
_lowercase : Optional[int] = 0.1 * sample
_lowercase : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config(**lowerCamelCase)
_lowercase : List[Any] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase)
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase , _lowercase : List[Any] = sample, sample
for t in range(lowerCamelCase, time_step + scheduler.config.solver_order + 1):
_lowercase : Dict = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : int = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[int] = dict(self.forward_default_kwargs)
_lowercase : List[str] = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : List[str] = self.dummy_sample
_lowercase : str = 0.1 * sample
_lowercase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config()
_lowercase : List[str] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : List[Any] = scheduler_class.from_pretrained(lowerCamelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residual (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase : Optional[int] = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : List[Any] = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self, lowerCamelCase=None, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if scheduler is None:
_lowercase : str = self.scheduler_classes[0]
_lowercase : int = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class(**lowerCamelCase)
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Optional[int] = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class(**lowerCamelCase)
_lowercase : List[Any] = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowercase : Optional[int] = model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
return sample
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
_lowercase : Optional[int] = 50
_lowercase : Union[str, Any] = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
_lowercase : Optional[Any] = model(lowerCamelCase, lowerCamelCase)
_lowercase : int = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
_lowercase : Optional[int] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_5_7_4) < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
_lowercase : List[str] = self.full_loop(scheduler=lowerCamelCase)
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
_lowercase : str = DEISMultistepScheduler.from_config(scheduler.config)
_lowercase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config)
_lowercase : Tuple = UniPCMultistepScheduler.from_config(scheduler.config)
_lowercase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config)
_lowercase : Any = self.full_loop(scheduler=lowerCamelCase)
_lowercase : Optional[int] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase, prediction_type=lowerCamelCase, sample_max_value=lowerCamelCase, algorithm_type='dpmsolver++', solver_order=lowerCamelCase, solver_type=lowerCamelCase, )
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
_lowercase : Optional[Any] = self.full_loop(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
assert not torch.isnan(lowerCamelCase).any(), "Samples have nan numbers"
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase)
self.check_over_configs(lower_order_final=lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf'))
self.check_over_configs(lambda_min_clipped=-5.1)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCamelCase)
self.check_over_configs(variance_type='learned_range')
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase, time_step=0)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.full_loop()
_lowercase : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(use_karras_sigmas=lowerCamelCase)
_lowercase : List[str] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_2_4_8) < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction')
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.1_4_5_3) < 1E-3
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCamelCase)
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.0_6_4_9) < 1E-3
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Optional[int] = self.get_scheduler_config(thresholding=lowerCamelCase, dynamic_thresholding_ratio=0)
_lowercase : Any = scheduler_class(**lowerCamelCase)
_lowercase : str = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowercase : Tuple = model(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
assert sample.dtype == torch.floataa
| 354 | 1 |
from __future__ import annotations
class A :
def __init__( self: Dict , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
UpperCAmelCase_ =order
# a_{0} ... a_{k}
UpperCAmelCase_ =[1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase_ =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase_ =[0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase_ =[0.0] * self.order
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: list[float] , _lowerCAmelCase: list[float] ) -> None:
'''simple docstring'''
if len(_lowerCAmelCase ) < self.order:
UpperCAmelCase_ =[1.0, *a_coeffs]
if len(_lowerCAmelCase ) != self.order + 1:
UpperCAmelCase_ =(
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowerCAmelCase )}'
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != self.order + 1:
UpperCAmelCase_ =(
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowerCAmelCase )}'
)
raise ValueError(_lowerCAmelCase )
UpperCAmelCase_ =a_coeffs
UpperCAmelCase_ =b_coeffs
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: float ) -> float:
'''simple docstring'''
UpperCAmelCase_ =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase_ =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase_ =self.input_history[:-1]
UpperCAmelCase_ =self.output_history[:-1]
UpperCAmelCase_ =sample
UpperCAmelCase_ =result
return result
| 54 |
"""simple docstring"""
from math import pi, sqrt, tan
def _lowerCAmelCase(a : float ) -> float:
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _lowerCAmelCase(a : float , a : float , a : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCAmelCase(a : float ) -> float:
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _lowerCAmelCase(a : float ) -> float:
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _lowerCAmelCase(a : float , a : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCAmelCase(a : float , a : float , a : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
_SCREAMING_SNAKE_CASE =(height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCAmelCase(a : float , a : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _lowerCAmelCase(a : float , a : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(a , 2 ) * torus_radius * tube_radius
def _lowerCAmelCase(a : float , a : float ) -> float:
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _lowerCAmelCase(a : float ) -> float:
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _lowerCAmelCase(a : float , a : float ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _lowerCAmelCase(a : float , a : float , a : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
_SCREAMING_SNAKE_CASE =(sidea + sidea + sidea) / 2
_SCREAMING_SNAKE_CASE =sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowerCAmelCase(a : float , a : float ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _lowerCAmelCase(a : float , a : float , a : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _lowerCAmelCase(a : float ) -> float:
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _lowerCAmelCase(a : float , a : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _lowerCAmelCase(a : float , a : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCAmelCase(a : int , a : float ) -> float:
if not isinstance(a , a ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"Rectangle: {area_rectangle(1_0, 2_0) = }")
print(f"Square: {area_square(1_0) = }")
print(f"Triangle: {area_triangle(1_0, 1_0) = }")
print(f"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }")
print(f"Parallelogram: {area_parallelogram(1_0, 2_0) = }")
print(f"Rhombus: {area_rhombus(1_0, 2_0) = }")
print(f"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }")
print(f"Circle: {area_circle(2_0) = }")
print(f"Ellipse: {area_ellipse(1_0, 2_0) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"Cube: {surface_area_cube(2_0) = }")
print(f"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }")
print(f"Sphere: {surface_area_sphere(2_0) = }")
print(f"Hemisphere: {surface_area_hemisphere(2_0) = }")
print(f"Cone: {surface_area_cone(1_0, 2_0) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }")
print(f"Cylinder: {surface_area_cylinder(1_0, 2_0) = }")
print(f"Torus: {surface_area_torus(2_0, 1_0) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }")
print(f"Square: {area_reg_polygon(4, 1_0) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
| 255 | 0 |
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
a__ : Tuple = 1
@register_to_config
def __init__(self : Tuple , __a : List[str]=2000 , __a : List[Any]=0.1 , __a : List[str]=20 , __a : Any=1E-3 ):
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : Dict , __a : Optional[int] , __a : Union[str, torch.device] = None ):
UpperCAmelCase_ = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Dict , __a : List[str] , __a : Union[str, Any]=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase_ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase_ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCAmelCase_ = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCAmelCase_ = std.unsqueeze(-1 )
UpperCAmelCase_ = -score / std
# compute
UpperCAmelCase_ = -1.0 / len(self.timesteps )
UpperCAmelCase_ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase_ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCAmelCase_ = beta_t.unsqueeze(-1 )
UpperCAmelCase_ = -0.5 * beta_t * x
UpperCAmelCase_ = torch.sqrt(__a )
UpperCAmelCase_ = drift - diffusion**2 * score
UpperCAmelCase_ = x + drift * dt
# add noise
UpperCAmelCase_ = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
UpperCAmelCase_ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self : Tuple ):
return self.config.num_train_timesteps
| 714 | '''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: str ={
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __magic_name__ ( __snake_case ):
UpperCamelCase : Optional[int] = "xglm"
UpperCamelCase : str = ["past_key_values"]
UpperCamelCase : Tuple = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self , __magic_name__=2_5_6_0_0_8 , __magic_name__=2_0_4_8 , __magic_name__=1_0_2_4 , __magic_name__=4_0_9_6 , __magic_name__=2_4 , __magic_name__=1_6 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=2 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = ffn_dim
_lowerCAmelCase = num_layers
_lowerCAmelCase = attention_heads
_lowerCAmelCase = activation_function
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = init_std
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 589 |
from manim import *
class A__ ( __snake_case ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCamelCase = Rectangle(height=0.2_5 , width=0.2_5 )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('CPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(4 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('GPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Model' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(_SCREAMING_SNAKE_CASE )
model_arr.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Disk' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.2_5, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = Square(0.3 )
input.set_fill(_SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(_SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0_2 )
self.play(MoveToTarget(_SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = Arrow(start=_SCREAMING_SNAKE_CASE , end=_SCREAMING_SNAKE_CASE , color=_SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , _SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCamelCase = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
UpperCamelCase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.0_2}
self.play(
Write(_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCamelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , _SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
UpperCamelCase = AnimationGroup(
FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCamelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCamelCase = a_c
UpperCamelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE ) , FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
UpperCamelCase = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(_SCREAMING_SNAKE_CASE ) )
self.wait()
| 280 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase ( lowercase_ ):
lowercase = 'Salesforce/blip-image-captioning-base'
lowercase = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowercase = 'image_captioner'
lowercase = AutoModelForVisionaSeq
lowercase = ['image']
lowercase = ['text']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['vision'] )
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
return self.pre_processor(images=__UpperCamelCase ,return_tensors='pt' )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.pre_processor.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase )[0].strip()
| 705 | """simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowercase__( ):
lowercase_ : List[Any] = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase_ : int = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Let's go
lowercase_ : str = parser.parse_args()
if not hasattr(__SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase_ : Optional[Any] = args.func(__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 477 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
return EnvironmentCommand()
def A ( UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class A ( SCREAMING_SNAKE_CASE__ ):
@staticmethod
def __SCREAMING_SNAKE_CASE ( __magic_name__ : ArgumentParser ):
"""simple docstring"""
lowerCAmelCase__ = parser.add_parser("env" )
download_parser.set_defaults(func=__magic_name__ )
download_parser.add_argument(
"--accelerate-config_file" , default=__magic_name__ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=__magic_name__ )
def __init__( self : List[Any] , __magic_name__ : Union[str, Any] , *__magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = accelerate_config_file
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "not installed"
if is_safetensors_available():
import safetensors
lowerCAmelCase__ = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCAmelCase__ = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
lowerCAmelCase__ = "not installed"
lowerCAmelCase__ = lowerCAmelCase__ = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__magic_name__ ):
lowerCAmelCase__ = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase__ = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__magic_name__ , __magic_name__ )
else f"""\t{accelerate_config}"""
)
lowerCAmelCase__ = "not installed"
lowerCAmelCase__ = "NA"
if is_torch_available():
import torch
lowerCAmelCase__ = torch.__version__
lowerCAmelCase__ = torch.cuda.is_available()
lowerCAmelCase__ = "not installed"
lowerCAmelCase__ = "NA"
if is_tf_available():
import tensorflow as tf
lowerCAmelCase__ = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase__ = bool(tf.config.list_physical_devices("GPU" ) )
lowerCAmelCase__ = "not installed"
lowerCAmelCase__ = "not installed"
lowerCAmelCase__ = "not installed"
lowerCAmelCase__ = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase__ = flax.__version__
lowerCAmelCase__ = jax.__version__
lowerCAmelCase__ = jaxlib.__version__
lowerCAmelCase__ = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase__ = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f"""{safetensors_version}""",
"Accelerate version": f"""{accelerate_version}""",
"Accelerate config": f"""{accelerate_config_str}""",
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": f"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": f"""{flax_version} ({jax_backend})""",
"Jax version": f"""{jax_version}""",
"JaxLib version": f"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__magic_name__ ) )
return info
@staticmethod
def __SCREAMING_SNAKE_CASE ( __magic_name__ : List[Any] ):
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 48 |
'''simple docstring'''
import operator
def snake_case ( snake_case : list , snake_case : bool = False , snake_case : list | None = None ) -> list:
"""simple docstring"""
lowerCAmelCase = operator.lt if reverse else operator.gt
lowerCAmelCase = solution or []
if not arr:
return solution
lowerCAmelCase = [arr.pop(0 )]
for i, item in enumerate(snake_case ):
if _operator(snake_case , sublist[-1] ):
sublist.append(snake_case )
arr.pop(snake_case )
# merging sublist into solution list
if not solution:
solution.extend(snake_case )
else:
while sublist:
lowerCAmelCase = sublist.pop(0 )
for i, xx in enumerate(snake_case ):
if not _operator(snake_case , snake_case ):
solution.insert(snake_case , snake_case )
break
else:
solution.append(snake_case )
strand_sort(snake_case , snake_case , snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 284 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ (_UpperCAmelCase):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
a_ : Dict = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _snake_case ( A__ ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> Any:
SCREAMING_SNAKE_CASE = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=a , required=a , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=a , required=a , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=a , required=a , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=a , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=a , default=a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=a)
def __init__( self , a , a , a , a , a , *a , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/converting')
self._logger.info(f'''Loading model {model_type}''')
SCREAMING_SNAKE_CASE = model_type
SCREAMING_SNAKE_CASE = tf_checkpoint
SCREAMING_SNAKE_CASE = pytorch_dump_output
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = finetuning_task_name
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE = self._tf_checkpoint
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = self._tf_checkpoint
SCREAMING_SNAKE_CASE = ''
convert_transfo_xl_checkpoint_to_pytorch(
a , self._config , self._pytorch_dump_output , a)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 444 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a_ : int = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 444 | 1 |
"""simple docstring"""
class _lowerCAmelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = row
lowerCAmelCase__ :Optional[int] = col
lowerCAmelCase__ :int = graph
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowerCAmelCase__ :Tuple = [-1, 0, 1, -1, 1, -1, 0, 1]
lowerCAmelCase__ :Dict = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __UpperCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __UpperCAmelCase )
def snake_case ( self ): # And finally, count all islands.
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowerCAmelCase__ :Union[str, Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += 1
return count
| 93 | import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
if weight_type is not None:
UpperCAmelCase__ = getattr(__A, __A ).shape
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "running_mean":
UpperCAmelCase__ = value
elif weight_type == "running_var":
UpperCAmelCase__ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ = value
elif weight_type == "inv_freq":
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__A, __A, __A, __A, hf_model.config.feat_extract_norm == "group", )
UpperCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(__A )[0].split("." )[-2]
UpperCAmelCase__ = mapped_key.replace("*", __A )
if "pos_bias_u" in name:
UpperCAmelCase__ = None
elif "pos_bias_v" in name:
UpperCAmelCase__ = None
elif "weight_g" in name:
UpperCAmelCase__ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ = "weight_v"
elif "bias" in name:
UpperCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = "weight"
elif "running_mean" in name:
UpperCAmelCase__ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase__ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase__ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__ = "num_batches_tracked"
else:
UpperCAmelCase__ = None
set_recursively(__A, __A, __A, __A, __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ = name.split("." )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None, __A=True ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConformerConfig.from_pretrained(__A, hidden_act="swish" )
else:
UpperCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase__ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(__A, "vocab.json" )
if not os.path.isdir(__A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__A ) )
return
os.makedirs(__A, exist_ok=__A )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(__A, "w", encoding="utf-8" ) as vocab_handle:
json.dump(__A, __A )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
__A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=__A, )
UpperCAmelCase__ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=__A, return_attention_mask=__A, )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=__A, tokenizer=__A )
processor.save_pretrained(__A )
UpperCAmelCase__ = WavaVecaConformerForCTC(__A )
else:
UpperCAmelCase__ = WavaVecaConformerForPreTraining(__A )
if is_finetuned:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ = fairseq.tasks.setup_task(__A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=__A )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(__A, __A, not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 486 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 4_00_00_00 ):
'''simple docstring'''
lowercase = [0, 1]
lowercase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase = 0
for j in range(len(__snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 134 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = 2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda __snake_case : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 134 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_snake_case : Tuple = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __UpperCAmelCase :
__lowercase = None
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(lowerCAmelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCAmelCase_ )
_snake_case = self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
_snake_case = self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase_ )
| 495 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : str = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ["""PoolFormerFeatureExtractor"""]
__lowercase : List[str] = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 66 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Tuple = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 66 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
# Initialise PyTorch model
UpperCamelCase_ : List[str] = AlbertConfig.from_json_file(_UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_ : Tuple = AlbertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 635 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase = '''lower'''
__lowerCamelCase = ['''low''', '''er</w>''']
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = tokens + ['''<unk>''']
__lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 175 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( __snake_case ):
UpperCAmelCase__ : List[str] = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Optional[Any] = '''ChineseCLIPImageProcessor'''
UpperCAmelCase__ : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] , _snake_case : str=None , _snake_case : Dict=None , **_snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_lowercase , _lowercase)
UpperCAmelCase_ = self.image_processor
def __call__( self : Dict , _snake_case : int=None , _snake_case : List[str]=None , _snake_case : int=None , **_snake_case : List[str]):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase)
if images is not None:
UpperCAmelCase_ = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase)
if text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase) , tensor_type=_lowercase)
def lowerCamelCase ( self : int , *_snake_case : Optional[int] , **_snake_case : Tuple):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def lowerCamelCase ( self : Tuple , *_snake_case : Union[str, Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
| 719 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A (__A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def A (__A : int , __A : int , __A : int , __A : int , __A : int , __A : int ) -> tuple[int, int]:
"""simple docstring"""
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(__A , __A )
top //= hcf
bottom //= hcf
return top, bottom
def A (__A : int = 35 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
for num, den in unique_s:
total += Fraction(__A , __A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 169 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase__ ( __magic_name__ : List[str]=2 , __magic_name__ : Tuple=3 , __magic_name__ : Tuple=16 , __magic_name__ : int = 10 , __magic_name__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
def get_dataset(__magic_name__ : List[str] ):
snake_case__ : int = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__magic_name__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
snake_case__ : Optional[Any] = get_dataset(__magic_name__ )
snake_case__ : Union[str, Any] = get_dataset(__magic_name__ )
snake_case__ : int = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
snake_case__ : Optional[int] = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = []
for epoch in range(__magic_name__ ):
# Train quickly
model.train()
for batch in dataloader:
snake_case__ , snake_case__ : Tuple = batch
snake_case__ : int = model(__magic_name__ )
snake_case__ : Union[str, Any] = torch.nn.functional.mse_loss(__magic_name__ , __magic_name__ )
accelerator.backward(__magic_name__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
snake_case__ : int = nn.Parameter(torch.randn(1 ) )
snake_case__ : Tuple = nn.Parameter(torch.randn(1 ) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return x * self.a + self.b
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Optional[int] = DummyModel()
snake_case__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Dict = dummy_dataloaders()
snake_case__ : List[str] = ProjectConfiguration(total_limit=1 , project_dir=__SCREAMING_SNAKE_CASE , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
snake_case__ : Any = Accelerator(project_config=__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Any = DummyModel()
snake_case__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Optional[Any] = dummy_dataloaders()
# Train baseline
snake_case__ : Union[str, Any] = Accelerator()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
snake_case__ : Dict = os.path.join(__SCREAMING_SNAKE_CASE , """initial""" )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : List[str] = model.a.item(), model.b.item()
snake_case__ : List[Any] = optimizer.state_dict()
snake_case__ : Optional[Any] = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : str = model.a.item(), model.b.item()
snake_case__ : Dict = optimizer.state_dict()
# Train partially
set_seed(4_2 )
snake_case__ : Dict = DummyModel()
snake_case__ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Optional[Any] = dummy_dataloaders()
snake_case__ : Optional[Any] = Accelerator()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.load_state(__SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : Any = model.a.item(), model.b.item()
snake_case__ : List[str] = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save everything
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , """checkpoint""" )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(__SCREAMING_SNAKE_CASE )
test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : Union[str, Any] = model.a.item(), model.b.item()
snake_case__ : str = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Union[str, Any] = DummyModel()
snake_case__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Any = dummy_dataloaders()
snake_case__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
snake_case__ : Optional[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((snake_case__) , (snake_case__)) : Union[str, Any] = model.a.item(), model.b.item()
snake_case__ : List[str] = optimizer.state_dict()
snake_case__ : Any = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : Optional[int] = model.a.item(), model.b.item()
snake_case__ : int = optimizer.state_dict()
# Train partially
set_seed(4_2 )
snake_case__ : Any = DummyModel()
snake_case__ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Union[str, Any] = dummy_dataloaders()
snake_case__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) )
((snake_case__) , (snake_case__)) : Any = model.a.item(), model.b.item()
snake_case__ : List[str] = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : Tuple = model.a.item(), model.b.item()
snake_case__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = torch.tensor([1, 2, 3] )
snake_case__ : List[Any] = torch.tensor([2, 3, 4] )
snake_case__ : Union[str, Any] = DummyModel()
snake_case__ : Any = torch.optim.Adam(net.parameters() )
snake_case__ : Union[str, Any] = Accelerator()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : List[str] = DummyModel()
snake_case__ : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ : Union[str, Any] = torch.optim.lr_scheduler.StepLR(__SCREAMING_SNAKE_CASE , step_size=1 , gamma=0.99 )
snake_case__ , snake_case__ : Dict = dummy_dataloaders()
snake_case__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
snake_case__ : Optional[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
snake_case__ : Any = scheduler.state_dict()
train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict() )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : str = DummyModel()
snake_case__ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE , total_limit=2 )
# Train baseline
snake_case__ : Any = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = accelerator.prepare(__SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __UpperCamelCase ( self ):
snake_case__ : Tuple = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = "/tmp/accelerate/state_checkpointing"
A_ : List[str] = DummyModel()
A_ : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
A_ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[str] = dummy_dataloaders()
A_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : Tuple = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : Optional[int] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Any = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : Optional[int] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
A_ : List[str] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
A_ : List[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
A_ : Optional[int] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 38 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 0 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any=100 , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : Tuple=30 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : int=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple=10 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : List[str]=3 , ) -> str:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = FlaxBeitModel(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = FlaxBeitForMaskedImageModeling(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = FlaxBeitForImageClassification(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FlaxBeitForImageClassification(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
def lowercase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase_ ( self : Dict ) -> None:
SCREAMING_SNAKE_CASE__ = FlaxBeitModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowercase_ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase : str , **__lowerCamelCase : Dict ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE__ = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowercase_ ( self : List[Any] ) -> Any:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : List[str] ) -> Dict:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__lowerCamelCase , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE__ = np.ones((1, 196) , dtype=__lowerCamelCase )
# forward pass
SCREAMING_SNAKE_CASE__ = model(pixel_values=__lowerCamelCase , bool_masked_pos=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ = (1, 196, 8192)
self.assertEqual(logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __lowerCamelCase , atol=1e-2 ) )
@slow
def lowercase_ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__lowerCamelCase , return_tensors='''np''' )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ = (1, 1000)
self.assertEqual(logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE__ = 281
self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase )
@slow
def lowercase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__lowerCamelCase , return_tensors='''np''' )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ = (1, 2_1841)
self.assertEqual(logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase )
| 708 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "longformer"
def __init__( self : int , __lowerCamelCase : Union[List[int], int] = 512 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 3_0522 , __lowerCamelCase : int = 768 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 3072 , __lowerCamelCase : str = "gelu" , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 2 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1e-12 , __lowerCamelCase : bool = False , **__lowerCamelCase : Tuple , ) -> List[Any]:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = attention_window
SCREAMING_SNAKE_CASE__ = sep_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = onnx_export
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : "PretrainedConfig" , __lowerCamelCase : str = "default" , __lowerCamelCase : "List[PatchingSpec]" = None ) -> List[str]:
super().__init__(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = True
@property
def lowercase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def lowercase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ = {0: '''batch'''}
return outputs
@property
def lowercase_ ( self : Optional[int] ) -> float:
return 1e-4
@property
def lowercase_ ( self : Union[str, Any] ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowercase_ ( self : Dict , __lowerCamelCase : "PreTrainedTokenizerBase" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(
preprocessor=__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
SCREAMING_SNAKE_CASE__ = 1
return inputs
| 472 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 477 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE__ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
_snake_case : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : Optional[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
_snake_case : Any = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Pipeline , lowerCAmelCase : PipelineDataFormat) -> Dict:
"""simple docstring"""
_snake_case : int = nlp
_snake_case : Dict = reader
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : ArgumentParser) -> Any:
"""simple docstring"""
_snake_case : Any = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""")
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""")
run_parser.add_argument("""--input""" , type=lowerCAmelCase , help="""Path to the file to use for inference""")
run_parser.add_argument("""--output""" , type=lowerCAmelCase , help="""Path to the file that will be used post to write results.""")
run_parser.add_argument("""--model""" , type=lowerCAmelCase , help="""Name or path to the model to instantiate.""")
run_parser.add_argument("""--config""" , type=lowerCAmelCase , help="""Name or path to the model's config to instantiate.""")
run_parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""")
run_parser.add_argument(
"""--column""" , type=lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""")
run_parser.set_defaults(func=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : int = self._nlp, []
for entry in self._reader:
_snake_case : List[Any] = nlp(**lowerCAmelCase) if self._reader.is_multi_columns else nlp(lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
outputs.append(lowerCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : Any = self._reader.save_binary(lowerCAmelCase)
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''')
else:
self._reader.save(lowerCAmelCase)
| 477 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __lowerCamelCase ( __a :List[str] , __a :Union[str, Any] , __a :Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = hf_hub_url(repo_id=__a , path=__a , revision=__a )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__a )}'
| 247 |
import os
import string
import sys
A : Dict = 1 << 8
A : Dict = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
A : Any = KEYMAP['''up''']
A : Optional[Any] = KEYMAP['''left''']
if sys.platform == "win32":
A : Optional[Any] = []
A : str = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
A : Tuple = ord(str(i))
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
A__ = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__a ) == 0:
# Read the keystroke
A__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
A__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
A__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__a )
if ord(__a ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
A__ = chr(KEYMAP["""esc"""] )
except KeyError:
A__ = cha[1]
else:
A__ = ch.decode(__a )
else:
A__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
A__ = sys.stdin.fileno()
A__ = termios.tcgetattr(__a )
try:
tty.setraw(__a )
A__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__a , termios.TCSADRAIN , __a )
return ch
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = get_raw_chars()
if ord(__a ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__a ) == KEYMAP["esc"]:
A__ = get_raw_chars()
if ord(__a ) == KEYMAP["mod_int"]:
A__ = get_raw_chars()
if ord(__a ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__a ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__a ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 247 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Optional[int] = '''Speech2TextFeatureExtractor'''
A__ : Tuple = '''Speech2TextTokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =self.feature_extractor
lowerCamelCase__ =False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase__ =kwargs.pop("raw_speech" )
else:
lowerCamelCase__ =kwargs.pop("audio" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("sampling_rate" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("text" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ =args[0]
lowerCamelCase__ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase__ =self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowerCamelCase__ =self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ =encodings["input_ids"]
return inputs
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _a ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase__ =True
lowerCamelCase__ =self.tokenizer
yield
lowerCamelCase__ =self.feature_extractor
lowerCamelCase__ =False
| 530 | """simple docstring"""
from __future__ import annotations
from typing import Any
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 ):
lowerCamelCase__ , lowerCamelCase__ =row, column
lowerCamelCase__ =[[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self ):
lowerCamelCase__ =F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase__ =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ =max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
lowerCamelCase__ =F'''%{max_element_length}s'''
# Make string and return
def single_line(_lowerCamelCase ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def _a ( self , _lowerCamelCase ):
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _lowerCamelCase ):
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _lowerCamelCase , _lowerCamelCase ):
assert self.validate_indicies(_lowerCamelCase )
lowerCamelCase__ =value
def __add__( self , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c] + another[r, c]
return result
def __neg__( self ):
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =-self[r, c]
return result
def __sub__( self , _lowerCamelCase ):
return self + (-another)
def __mul__( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ =F'''Unsupported type given for another ({type(_lowerCamelCase )})'''
raise TypeError(_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c]
return result
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ =v.transpose()
lowerCamelCase__ =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase__ =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ =1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowerCamelCase__ =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =1, 2, -3
lowerCamelCase__ =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 530 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ : Optional[int] = "base_with_context"
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
for lyr_num, lyr in enumerate(model.encoders ):
_SCREAMING_SNAKE_CASE = weights[F"layers_{lyr_num}"]
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = ly_weight["""attention"""]
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
for lyr_num, lyr in enumerate(model.encoders ):
_SCREAMING_SNAKE_CASE = weights[F"layers_{lyr_num}"]
_SCREAMING_SNAKE_CASE = ly_weight["""attention"""]
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_SCREAMING_SNAKE_CASE = weights[F"layers_{lyr_num}"]
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = ly_weight["""self_attention"""]
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = ly_weight["""MultiHeadDotProductAttention_0"""]
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_SCREAMING_SNAKE_CASE = jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
_SCREAMING_SNAKE_CASE = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
_SCREAMING_SNAKE_CASE = inference.parse_training_gin_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
_SCREAMING_SNAKE_CASE = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_SCREAMING_SNAKE_CASE = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_SCREAMING_SNAKE_CASE = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_SCREAMING_SNAKE_CASE = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_SCREAMING_SNAKE_CASE = SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE_ , continuous_encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , melgan=SCREAMING_SNAKE_CASE_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
UpperCamelCase__ : List[Any] = parser.parse_args()
main(args)
| 0 |
'''simple docstring'''
import sys
UpperCamelCase__ : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase__ ( _UpperCamelCase) -> List[str]:
"""simple docstring"""
UpperCamelCase = filter(lambda _UpperCamelCase: p.requires_grad , model.parameters())
UpperCamelCase = sum([np.prod(p.size()) for p in model_parameters])
return params
__magic_name__ : List[str] = logging.getLogger(__name__)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> str:
"""simple docstring"""
if metric == "rouge2":
UpperCamelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
UpperCamelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
UpperCamelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.')
UpperCamelCase = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=F'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> str:
"""simple docstring"""
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = {f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any]=True ):
"""simple docstring"""
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase = od / 'test_results.txt'
UpperCamelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
UpperCamelCase = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'a+' ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCamelCase = val.item()
UpperCamelCase = f'{key}: {val:.6f}\n'
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
try:
UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase = pl_module.model.num_parameters()
UpperCamelCase = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'test' )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 280 |
def lowercase__ ( _UpperCamelCase) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError('The given input must be positive')
# get the generated string sequence
UpperCamelCase = gray_code_sequence_string(_UpperCamelCase)
#
# convert them to integers
for i in range(len(_UpperCamelCase)):
UpperCamelCase = int(sequence[i] , 2)
return sequence
def lowercase__ ( _UpperCamelCase) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCamelCase = gray_code_sequence_string(bit_count - 1)
UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2):
UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(_UpperCamelCase)
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2)):
UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(_UpperCamelCase)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ = 300 # TEMPERATURE (unit = K)
def _UpperCamelCase (a__ :float , a__ :float , a__ :float , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import argparse
import datetime
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
UpperCamelCase__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
UpperCamelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(a__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
UpperCamelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
UpperCamelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
UpperCamelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
UpperCamelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
UpperCamelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
UpperCamelCase__ = datetime.date(int(a__ ) , int(a__ ) , int(a__ ) )
# Start math
if m <= 2:
UpperCamelCase__ = y - 1
UpperCamelCase__ = m + 12
# maths var
UpperCamelCase__ = int(str(a__ )[:2] )
UpperCamelCase__ = int(str(a__ )[2:] )
UpperCamelCase__ = int(2.6 * m - 5.39 )
UpperCamelCase__ = int(c / 4 )
UpperCamelCase__ = int(k / 4 )
UpperCamelCase__ = int(d + k )
UpperCamelCase__ = int(t + u + v + x )
UpperCamelCase__ = int(z - (2 * c) )
UpperCamelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
UpperCamelCase__ = f"""Your date {date_input}, is a {days[str(a__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
UpperCamelCase__ = parser.parse_args()
zeller(args.date_input)
| 548 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any]="ro" , __lowerCamelCase : Optional[Any]="en" , __lowerCamelCase : Optional[int]="wmt16" , __lowerCamelCase : Tuple=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
_snake_case = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
_snake_case = datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
_snake_case = f'''{dataset}-{pair}'''
_snake_case = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
_snake_case = '''val''' if split == '''validation''' else split
_snake_case = save_dir.joinpath(f'''{fn}.source''' )
_snake_case = save_dir.joinpath(f'''{fn}.target''' )
_snake_case = src_path.open('''w+''' )
_snake_case = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_snake_case = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 224 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 16
UpperCAmelCase__ = 32
def _UpperCAmelCase ( __lowerCamelCase : Accelerator , __lowerCamelCase : int = 16 ) -> Optional[int]:
_snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_snake_case = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
_snake_case = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
_snake_case = 2
# Initialize accelerator
_snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config['''lr''']
_snake_case = int(config['''num_epochs'''] )
_snake_case = int(config['''seed'''] )
_snake_case = int(config['''batch_size'''] )
_snake_case = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase : str ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=__lowerCamelCase )
_snake_case , _snake_case = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case = model(**__lowerCamelCase )
_snake_case = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**__lowerCamelCase )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_snake_case = parser.parse_args()
_snake_case = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 224 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Tuple ="roc_bert"
def __init__( self : Tuple , lowerCAmelCase : Union[str, Any]=3_05_22 , lowerCAmelCase : Union[str, Any]=7_68 , lowerCAmelCase : str=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=30_72 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : int=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1e-12 , lowerCAmelCase : Any=True , lowerCAmelCase : int=0 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[str]=None , lowerCAmelCase : str=True , lowerCAmelCase : Dict=True , lowerCAmelCase : int=7_68 , lowerCAmelCase : Union[str, Any]=9_10 , lowerCAmelCase : Tuple=5_12 , lowerCAmelCase : Tuple=2_48_58 , lowerCAmelCase : Any=True , **lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : Dict = type_vocab_size
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = use_cache
__lowerCAmelCase : Dict = enable_pronunciation
__lowerCAmelCase : Optional[int] = enable_shape
__lowerCAmelCase : Any = pronunciation_embed_dim
__lowerCAmelCase : Optional[Any] = pronunciation_vocab_size
__lowerCAmelCase : Tuple = shape_embed_dim
__lowerCAmelCase : Tuple = shape_vocab_size
__lowerCAmelCase : List[Any] = concat_input
__lowerCAmelCase : List[Any] = position_embedding_type
__lowerCAmelCase : List[Any] = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
| 218 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase__ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[Any] = []
for i in range(self.num_layers):
_lowerCamelCase : Dict = self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Optional[Any] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = resnets
_lowerCamelCase : Tuple = attentions
if self.add_downsample:
_lowerCamelCase : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> Optional[int]:
_lowerCamelCase : Tuple = ()
for resnet, attn in zip(self.resnets , self.attentions):
_lowerCamelCase : List[str] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE)
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : int = self.downsamplers_a(SCREAMING_SNAKE_CASE)
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = jnp.floataa
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Optional[int] = []
for i in range(self.num_layers):
_lowerCamelCase : Tuple = self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Optional[Any] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = resnets
if self.add_downsample:
_lowerCamelCase : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> Tuple:
_lowerCamelCase : Optional[int] = ()
for resnet in self.resnets:
_lowerCamelCase : Dict = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE)
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : List[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE)
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : List[str] = []
for i in range(self.num_layers):
_lowerCamelCase : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = resnets
_lowerCamelCase : Optional[Any] = attentions
if self.add_upsample:
_lowerCamelCase : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> int:
for resnet, attn in zip(self.resnets , self.attentions):
# pop res hidden states
_lowerCamelCase : Any = res_hidden_states_tuple[-1]
_lowerCamelCase : Tuple = res_hidden_states_tuple[:-1]
_lowerCamelCase : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
_lowerCamelCase : str = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE)
if self.add_upsample:
_lowerCamelCase : Any = self.upsamplers_a(SCREAMING_SNAKE_CASE)
return hidden_states
class lowercase__ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = jnp.floataa
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : str = []
for i in range(self.num_layers):
_lowerCamelCase : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = resnets
if self.add_upsample:
_lowerCamelCase : Dict = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> int:
for resnet in self.resnets:
# pop res hidden states
_lowerCamelCase : Union[str, Any] = res_hidden_states_tuple[-1]
_lowerCamelCase : str = res_hidden_states_tuple[:-1]
_lowerCamelCase : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
_lowerCamelCase : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE)
if self.add_upsample:
_lowerCamelCase : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE)
return hidden_states
class lowercase__ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def UpperCamelCase_ ( self) -> Optional[int]:
# there is always at least one resnet
_lowerCamelCase : Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCamelCase : str = []
for _ in range(self.num_layers):
_lowerCamelCase : Dict = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = resnets
_lowerCamelCase : List[str] = attentions
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> List[Any]:
_lowerCamelCase : Any = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
for attn, resnet in zip(self.attentions , self.resnets[1:]):
_lowerCamelCase : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE)
return hidden_states
| 88 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''vivit'''
def __init__( self : Union[str, Any] , lowercase : int=2_24 , lowercase : Tuple=32 , lowercase : str=[2, 16, 16] , lowercase : str=3 , lowercase : Dict=7_68 , lowercase : Union[str, Any]=12 , lowercase : List[Any]=12 , lowercase : Dict=30_72 , lowercase : int="gelu_fast" , lowercase : Dict=0.0 , lowercase : Dict=0.0 , lowercase : List[str]=0.0_2 , lowercase : Tuple=1E-06 , lowercase : Any=True , **lowercase : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : str = num_frames
UpperCAmelCase : str = tubelet_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Optional[int] = qkv_bias
super().__init__(**lowercase )
| 595 | 0 |
import datasets
from .evaluate import evaluate
a= "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
a= "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
a= "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Optional[int] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__UpperCamelCase : Optional[Any] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__UpperCamelCase : Optional[Any] = evaluate(dataset=__UpperCamelCase , predictions=__UpperCamelCase )
return score
| 707 | '''simple docstring'''
def _UpperCamelCase ( _a : int ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase : Dict = gray_code_sequence_string(_a )
#
# convert them to integers
for i in range(len(_a ) ):
__UpperCamelCase : int = int(sequence[i] , 2 )
return sequence
def _UpperCamelCase ( _a : int ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase : Tuple = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase : Optional[Any] = '0' + smaller_sequence[i]
sequence.append(_a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase : List[Any] = '1' + smaller_sequence[i]
sequence.append(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
'''simple docstring'''
from itertools import product
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
lowercase = sides_number
lowercase = max_face_number * dice_number
lowercase = [0] * (max_total + 1)
lowercase = 1
lowercase = range(lowercase_ , max_face_number + 1 )
for dice_numbers in product(lowercase_ , repeat=lowercase_ ):
lowercase = sum(lowercase_ )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE ( ):
lowercase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase = 0
lowercase = 9
lowercase = 4 * 9
lowercase = 6
for peter_total in range(lowercase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase = (4**9) * (6**6)
lowercase = peter_wins_count / total_games_number
lowercase = round(lowercase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 588 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def _a ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _a ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def _a ( self ) -> int:
'''simple docstring'''
def extract(*_lowerCAmelCase , **_lowerCAmelCase ):
class __UpperCamelCase :
def __init__( self ) -> List[str]:
'''simple docstring'''
lowercase = torch.ones([0] )
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = self.dummy_image.to(_lowerCAmelCase )
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , )
lowercase = output.images
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((760, 504) )
lowercase = """BAAI/AltDiffusion"""
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="""np""" , )
lowercase = output.images[0]
lowercase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase = init_image.resize((768, 512) )
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowercase = """BAAI/AltDiffusion"""
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 588 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __A ( A ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(A , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(A , '''num_attention_heads''' ) )
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=32 , A=2 , A=3 , A=640 , A=4 , A="silu" , A=3 , A=32 , A=0.1 , A=0.1 , A=0.1 , A=0.02 , A=True , A=True , A=10 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = last_hidden_size
_a = num_attention_heads
_a = hidden_act
_a = conv_kernel_size
_a = output_stride
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = classifier_dropout_prob
_a = use_labels
_a = is_training
_a = num_labels
_a = initializer_range
_a = scope
def a__ (self ) -> Dict:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels, pixel_labels
def a__ (self ) -> Any:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def a__ (self , A , A , A , A ) -> Tuple:
"""simple docstring"""
_a = MobileViTModel(config=A )
model.to(A )
model.eval()
_a = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a__ (self , A , A , A , A ) -> Tuple:
"""simple docstring"""
_a = self.num_labels
_a = MobileViTForImageClassification(A )
model.to(A )
model.eval()
_a = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ (self , A , A , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.num_labels
_a = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
_a = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : List[str] = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = MobileViTModelTester(self )
_a = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def a__ (self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def a__ (self ) -> List[Any]:
"""simple docstring"""
pass
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(A , A , A ):
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.hidden_states
_a = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(A , A , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ():
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ) -> Tuple:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(A )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
_a = model(**A )
# verify the logits
_a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
_a = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a = model.to(A )
_a = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a = prepare_img()
_a = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
_a = model(**A )
_a = outputs.logits
# verify the logits
_a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
_a = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
_a = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a = model.to(A )
_a = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a = prepare_img()
_a = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
_a = model(**A )
_a = outputs.logits.detach().cpu()
_a = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
_a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
_a = image_processor.post_process_semantic_segmentation(outputs=A )
_a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 352 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = 'pegasus'
__lowerCamelCase : Tuple = ['past_key_values']
__lowerCamelCase : List[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , A=50_265 , A=1_024 , A=12 , A=4_096 , A=16 , A=12 , A=4_096 , A=16 , A=0.0 , A=0.0 , A=True , A=True , A="gelu" , A=1_024 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=0 , A=False , A=0 , A=1 , A=1 , **A , ) -> str:
"""simple docstring"""
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.d_model
| 352 | 1 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__UpperCamelCase : Union[str, Any] = TypeVar('''T''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return (position - 1) // 2
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return (2 * position) + 1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = {}
lowerCAmelCase = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.elements == 0
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.heap.append((elem, weight) )
lowerCAmelCase = self.elements
self.elements += 1
self._bubble_up(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCAmelCase ,lowerCAmelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase ,lowerCAmelCase = self.heap[0]
self._bubble_down(_SCREAMING_SNAKE_CASE )
return elem
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.position_map[elem]
lowerCAmelCase = (elem, weight)
if position > 0:
lowerCAmelCase = get_parent_position(_SCREAMING_SNAKE_CASE )
lowerCAmelCase ,lowerCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_SCREAMING_SNAKE_CASE )
else:
self._bubble_down(_SCREAMING_SNAKE_CASE )
else:
self._bubble_down(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase = get_parent_position(_SCREAMING_SNAKE_CASE )
lowerCAmelCase ,lowerCAmelCase = self.heap[curr_pos]
lowerCAmelCase ,lowerCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_up(_SCREAMING_SNAKE_CASE )
return None
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.position_map[elem]
lowerCAmelCase ,lowerCAmelCase = self.heap[curr_pos]
lowerCAmelCase = get_child_left_position(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = get_child_right_position(_SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase ,lowerCAmelCase = self.heap[child_left_position]
lowerCAmelCase ,lowerCAmelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
lowerCAmelCase ,lowerCAmelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase ,lowerCAmelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
return None
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.heap[nodea_pos][0]
lowerCAmelCase = self.heap[nodea_pos][0]
lowerCAmelCase ,lowerCAmelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase = nodea_pos
lowerCAmelCase = nodea_pos
class a ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if node not in self.connections:
lowerCAmelCase = {}
self.nodes += 1
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
self.add_node(_SCREAMING_SNAKE_CASE )
self.add_node(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = weight
lowerCAmelCase = weight
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : GraphUndirectedWeighted[T] , ):
lowerCAmelCase = {node: maxsize for node in graph.connections}
lowerCAmelCase = {node: None for node in graph.connections}
lowerCAmelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_UpperCAmelCase , _UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase = priority_queue.extract_min()
lowerCAmelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
lowerCAmelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
lowerCAmelCase = node
return dist, parent
| 4 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Dict = (DDPMScheduler,)
def _SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 1, 0]
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 284 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def snake_case ( _a: Tuple , _a: List[Any] , _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
def snake_case ( _a: Optional[int] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
return new_state_dict
def snake_case ( _a: Optional[Any] , _a: Optional[int]=False )-> str:
'''simple docstring'''
lowerCamelCase__ = ""
if is_panoptic:
lowerCamelCase__ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCamelCase__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:256, :]
lowerCamelCase__ = in_proj_bias[:256]
lowerCamelCase__ = in_proj_weight[256:512, :]
lowerCamelCase__ = in_proj_bias[256:512]
lowerCamelCase__ = in_proj_weight[-256:, :]
lowerCamelCase__ = in_proj_bias[-256:]
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def snake_case ( _a: Any , _a: Optional[int] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ = "resnet101"
if "dc5" in model_name:
lowerCamelCase__ = True
lowerCamelCase__ = "panoptic" in model_name
if is_panoptic:
lowerCamelCase__ = 250
else:
lowerCamelCase__ = 91
lowerCamelCase__ = "huggingface/label-files"
lowerCamelCase__ = "coco-detection-id2label.json"
lowerCamelCase__ = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ = "coco_panoptic" if is_panoptic else "coco_detection"
lowerCamelCase__ = ConditionalDetrImageProcessor(format=__snake_case )
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__snake_case , return_tensors='pt' )
lowerCamelCase__ = encoding["pixel_values"]
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
lowerCamelCase__ = torch.hub.load('DeppMeng/ConditionalDETR' , __snake_case , pretrained=__snake_case ).eval()
lowerCamelCase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ = "conditional_detr." + src
rename_key(__snake_case , __snake_case , __snake_case )
lowerCamelCase__ = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
model.push_to_hub(repo_id=__snake_case , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
lowerCamelCase__ = conditional_detr(__snake_case )
lowerCamelCase__ = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : bytes ) -> None:
'''simple docstring'''
_UpperCamelCase = data
# Initialize hash values
_UpperCamelCase = [
0X6a_09_e6_67,
0Xbb_67_ae_85,
0X3c_6e_f3_72,
0Xa5_4f_f5_3a,
0X51_0e_52_7f,
0X9b_05_68_8c,
0X1f_83_d9_ab,
0X5b_e0_cd_19,
]
# Initialize round constants
_UpperCamelCase = [
0X42_8a_2f_98,
0X71_37_44_91,
0Xb5_c0_fb_cf,
0Xe9_b5_db_a5,
0X39_56_c2_5b,
0X59_f1_11_f1,
0X92_3f_82_a4,
0Xab_1c_5e_d5,
0Xd8_07_aa_98,
0X12_83_5b_01,
0X24_31_85_be,
0X55_0c_7d_c3,
0X72_be_5d_74,
0X80_de_b1_fe,
0X9b_dc_06_a7,
0Xc1_9b_f1_74,
0Xe4_9b_69_c1,
0Xef_be_47_86,
0X0f_c1_9d_c6,
0X24_0c_a1_cc,
0X2d_e9_2c_6f,
0X4a_74_84_aa,
0X5c_b0_a9_dc,
0X76_f9_88_da,
0X98_3e_51_52,
0Xa8_31_c6_6d,
0Xb0_03_27_c8,
0Xbf_59_7f_c7,
0Xc6_e0_0b_f3,
0Xd5_a7_91_47,
0X06_ca_63_51,
0X14_29_29_67,
0X27_b7_0a_85,
0X2e_1b_21_38,
0X4d_2c_6d_fc,
0X53_38_0d_13,
0X65_0a_73_54,
0X76_6a_0a_bb,
0X81_c2_c9_2e,
0X92_72_2c_85,
0Xa2_bf_e8_a1,
0Xa8_1a_66_4b,
0Xc2_4b_8b_70,
0Xc7_6c_51_a3,
0Xd1_92_e8_19,
0Xd6_99_06_24,
0Xf4_0e_35_85,
0X10_6a_a0_70,
0X19_a4_c1_16,
0X1e_37_6c_08,
0X27_48_77_4c,
0X34_b0_bc_b5,
0X39_1c_0c_b3,
0X4e_d8_aa_4a,
0X5b_9c_ca_4f,
0X68_2e_6f_f3,
0X74_8f_82_ee,
0X78_a5_63_6f,
0X84_c8_78_14,
0X8c_c7_02_08,
0X90_be_ff_fa,
0Xa4_50_6c_eb,
0Xbe_f9_a3_f7,
0Xc6_71_78_f2,
]
_UpperCamelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__ ( lowerCAmelCase__ : bytes ) -> bytes:
'''simple docstring'''
_UpperCamelCase = b'''\x80''' + (b'''\x00''' * (63 - (len(lowerCAmelCase__ ) + 8) % 64))
_UpperCamelCase = struct.pack('''>Q''' , (len(lowerCAmelCase__ ) * 8) )
return data + padding + big_endian_integer
def snake_case__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
_UpperCamelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_UpperCamelCase = list(struct.unpack('''>16L''' , lowerCAmelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_UpperCamelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_UpperCamelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_UpperCamelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
_UpperCamelCase = self.ror(lowerCAmelCase__ , 6 ) ^ self.ror(lowerCAmelCase__ , 11 ) ^ self.ror(lowerCAmelCase__ , 25 )
_UpperCamelCase = (e & f) ^ ((~e & 0Xff_ff_ff_ff) & g)
_UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
_UpperCamelCase = self.ror(lowerCAmelCase__ , 2 ) ^ self.ror(lowerCAmelCase__ , 13 ) ^ self.ror(lowerCAmelCase__ , 22 )
_UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
_UpperCamelCase = (sa + maj) % 0X1_00_00_00_00
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
_UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
_UpperCamelCase = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
_UpperCamelCase = ''''''.join([hex(lowerCAmelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__ ( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return 0Xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> None:
'''simple docstring'''
import hashlib
_UpperCamelCase = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(lowerCAmelCase__ ).hash , hashlib.shaaaa(lowerCAmelCase__ ).hexdigest() )
def a__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''-s''', '''--string''', dest='''input_string''', default='''Hello World!! Welcome to Cryptography''', help='''Hash the string''', )
parser.add_argument(
'''-f''', '''--file''', dest='''input_file''', help='''Hash contents of a file''' )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, '''rb''' ) as f:
_UpperCamelCase = f.read()
else:
_UpperCamelCase = bytes(lowercase, '''utf-8''' )
print(SHAaaa(lowercase ).hash )
if __name__ == "__main__":
main()
| 98 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
__lowerCamelCase = """</w>"""
__lowerCamelCase = """@@ """
def UpperCAmelCase__ ( __snake_case ) -> Optional[Any]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
__lowerCamelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , a , a="<s>" , a="<pad>" , a="</s>" , a="<unk>" , a=False , a=None , **a , ) -> str:
"""simple docstring"""
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , do_lower_case=a , **a , )
_A = do_lower_case
with open(a , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(a )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(a , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(a , range(len(a ) ) ) )
_A = {}
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , a ) -> int:
"""simple docstring"""
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(a )
if not pairs:
return token
while True:
_A = min(a , key=lambda a : self.bpe_ranks.get(a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(a ):
try:
_A = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(a )
_A = new_word
if len(a ) == 1:
break
else:
_A = get_pairs(a )
_A = ''' '''.join(a )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(a ):
_A = word.replace(a , '''''' )
_A = word.replace(''' ''' , a )
_A = word
return word
def lowercase_ ( self , a ) -> List[str]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(a ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self , a ) -> int:
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , a ) -> str:
"""simple docstring"""
_A = self.decoder.get(a , self.unk_token )
return result
def lowercase_ ( self , a ) -> str:
"""simple docstring"""
_A = ''' '''.join(a )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(a ) )
return string
def lowercase_ ( self , a , a = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(a , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(a ) + '''\n''' )
index += 1
return (vocab_file, merges_file) | 317 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _UpperCamelCase (a_ , unittest.TestCase ):
snake_case_ = WavaVecaPhonemeCTCTokenizer
snake_case_ = False
def __UpperCAmelCase ( self )-> str:
super().setUp()
__lowerCAmelCase = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
__lowerCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__lowerCAmelCase = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=2_0 , __UpperCamelCase=5 )-> Tuple[str, list]:
__lowerCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCamelCase )) for i in range(len(__UpperCamelCase ) )]
__lowerCAmelCase = list(filter(lambda __UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__UpperCamelCase ) , __UpperCamelCase ) )
if max_length is not None and len(__UpperCamelCase ) > max_length:
__lowerCAmelCase = toks[:max_length]
if min_length is not None and len(__UpperCamelCase ) < min_length and len(__UpperCamelCase ) > 0:
while len(__UpperCamelCase ) < min_length:
__lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
if " " not in output_txt and len(__UpperCamelCase ) > 1:
__lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCamelCase )
)
if with_prefix_space:
__lowerCAmelCase = " " + output_txt
__lowerCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
return output_txt, output_ids
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
__lowerCAmelCase = tokenizer("m xxx ɪ" , do_phonemize=__UpperCamelCase ).input_ids
self.assertEqual(__UpperCamelCase , [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
__lowerCAmelCase = tokenizer("m aaa ɪ ccc" , do_phonemize=__UpperCamelCase ).input_ids
self.assertEqual(__UpperCamelCase , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
__lowerCAmelCase = tokenizer("maɪ c" , do_phonemize=__UpperCamelCase ).input_ids
self.assertEqual(__UpperCamelCase , [3, 2_0_0] ) # mai should be <unk> (=3)
def __UpperCAmelCase ( self )-> Union[str, Any]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" )
self.assertEqual(__UpperCamelCase , "h ə l oʊ h aʊ ɑːɹ j uː" )
def __UpperCAmelCase ( self )-> Optional[Any]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__UpperCamelCase ).input_ids , tokenizer(__UpperCamelCase , do_phonemize=__UpperCamelCase ).input_ids )
def __UpperCAmelCase ( self )-> Tuple:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" )
__lowerCAmelCase = tokenizer.decode(tokenizer(__UpperCamelCase ).input_ids )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
__lowerCAmelCase = tokenizer.decode(sample_ids[0] )
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , batch_tokens[0] )
self.assertEqual(__UpperCamelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" )
self.assertEqual(__UpperCamelCase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__UpperCamelCase ).input_ids , tokenizer(__UpperCamelCase , do_phonemize=__UpperCamelCase ).input_ids )
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
__lowerCAmelCase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
__lowerCAmelCase = tokenizer.decode(sample_ids[0] )
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , batch_tokens[0] )
self.assertEqual(__UpperCamelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
__lowerCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__UpperCamelCase )
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase , filter_word_delimiter_token=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , batch_tokens[0] )
self.assertEqual(__UpperCamelCase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def __UpperCAmelCase ( self )-> Union[str, Any]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" )
__lowerCAmelCase = tokenizer.decode(tokenizer(__UpperCamelCase ).input_ids , filter_word_delimiter_token=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[Any]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" )
__lowerCAmelCase = tokenizer.decode(tokenizer(__UpperCamelCase ).input_ids , filter_word_delimiter_token=__UpperCamelCase )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__UpperCamelCase )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer(__UpperCamelCase , phonemizer_lang="en-us" ).input_ids
__lowerCAmelCase = tokenizer(__UpperCamelCase , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = tokenizer.decode(__UpperCamelCase )
__lowerCAmelCase = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__UpperCamelCase , "ɛ l o h aʊ a ʁ j u" )
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = "Hello how Are you"
__lowerCAmelCase = "hello how are you"
__lowerCAmelCase = tokenizer(__UpperCamelCase ).input_ids
__lowerCAmelCase = tokenizer(__UpperCamelCase ).input_ids
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
__lowerCAmelCase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowerCAmelCase = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
__lowerCAmelCase = tokenizer.decode(__UpperCamelCase , output_char_offsets=__UpperCamelCase , filter_word_delimiter_token=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertTrue(isinstance(outputs_list[0] , __UpperCamelCase ) )
# transform list to ModelOutput
__lowerCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(__UpperCamelCase , __UpperCamelCase ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
[recursive_check(__UpperCamelCase , __UpperCamelCase ) for la, la in zip(__UpperCamelCase , __UpperCamelCase )]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
__lowerCAmelCase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase , output_char_offsets=__UpperCamelCase )
__lowerCAmelCase = [tokenizer.decode(__UpperCamelCase , output_char_offsets=__UpperCamelCase ) for ids in sample_ids]
check_list_tuples_equal(__UpperCamelCase , __UpperCamelCase )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def __UpperCAmelCase ( self )-> Dict:
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def __UpperCAmelCase ( self )-> Any:
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def __UpperCAmelCase ( self )-> str:
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def __UpperCAmelCase ( self )-> Any:
pass
def __UpperCAmelCase ( self )-> Tuple:
__lowerCAmelCase = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__lowerCAmelCase = tokenizer.add_tokens(__UpperCamelCase )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size + len(__UpperCamelCase ) )
__lowerCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__lowerCAmelCase = tokenizer.add_special_tokens(__UpperCamelCase )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size_a + len(__UpperCamelCase ) )
__lowerCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def __UpperCAmelCase ( self )-> Dict:
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def __UpperCAmelCase ( self )-> Optional[Any]:
pass
def __UpperCAmelCase ( self )-> Tuple:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__lowerCAmelCase = self.get_tokenizers(fast=__UpperCamelCase , do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
__lowerCAmelCase = tokenizer.convert_tokens_to_string(__UpperCamelCase )
self.assertIsInstance(output["text"] , __UpperCamelCase )
| 290 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCamelCase (enum.Enum ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 2
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ ):
snake_case_ = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase = None
if self.model.config.prefix is not None:
__lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._sanitize_parameters(prefix=__UpperCamelCase , **self._forward_params )
__lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase = {**self._forward_params, **forward_params}
def __UpperCAmelCase ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , )-> int:
__lowerCAmelCase = {}
if prefix is not None:
__lowerCAmelCase = prefix
if prefix:
__lowerCAmelCase = self.tokenizer(
__UpperCamelCase , padding=__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=self.framework )
__lowerCAmelCase = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, 'hole']" )
__lowerCAmelCase = handle_long_generation
preprocess_params.update(__UpperCamelCase )
__lowerCAmelCase = generate_kwargs
__lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
__lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
__lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*__UpperCamelCase , **__UpperCamelCase )
def __call__( self , __UpperCamelCase , **__UpperCamelCase )-> List[Any]:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase=None , **__UpperCamelCase )-> int:
__lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=self.framework )
__lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase = generate_kwargs["max_new_tokens"]
else:
__lowerCAmelCase = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
__lowerCAmelCase = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase = inputs["attention_mask"][:, -keep_length:]
return inputs
def __UpperCAmelCase ( self , __UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
__lowerCAmelCase = model_inputs["input_ids"]
__lowerCAmelCase = model_inputs.get("attention_mask" , __UpperCamelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = 1
else:
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
__lowerCAmelCase = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase = self.model.generate(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase = generated_sequence.reshape(__UpperCamelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=ReturnType.FULL_TEXT , __UpperCamelCase=True )-> Any:
__lowerCAmelCase = model_outputs["generated_sequence"][0]
__lowerCAmelCase = model_outputs["input_ids"]
__lowerCAmelCase = model_outputs["prompt_text"]
__lowerCAmelCase = generated_sequence.numpy().tolist()
__lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase = self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase = 0
else:
__lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase = text[prompt_length:]
__lowerCAmelCase = {"generated_text": all_text}
records.append(__UpperCamelCase )
return records
| 290 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase_ : Tuple = 300 # TEMPERATURE (unit = K)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 442 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : str = '''transfo-xl'''
__magic_name__ : List[str] = ['''mems''']
__magic_name__ : Dict = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , lowercase__ : List[Any]=26_7735 , lowercase__ : Optional[Any]=[2_0000, 4_0000, 20_0000] , lowercase__ : Optional[Any]=1024 , lowercase__ : str=1024 , lowercase__ : Any=16 , lowercase__ : int=64 , lowercase__ : str=4096 , lowercase__ : Union[str, Any]=4 , lowercase__ : List[Any]=False , lowercase__ : List[Any]=18 , lowercase__ : str=1600 , lowercase__ : str=1000 , lowercase__ : Any=True , lowercase__ : Optional[Any]=True , lowercase__ : Union[str, Any]=0 , lowercase__ : str=-1 , lowercase__ : int=True , lowercase__ : str=0.1 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Tuple=True , lowercase__ : Optional[int]="normal" , lowercase__ : str=0.01 , lowercase__ : List[str]=0.01 , lowercase__ : Union[str, Any]=0.02 , lowercase__ : str=1e-5 , lowercase__ : Any=0 , **lowercase__ : List[str] , ):
'''simple docstring'''
a_ : Optional[Any] = vocab_size
a_ : Optional[int] = []
self.cutoffs.extend(lowercase__ )
if proj_share_all_but_first:
a_ : Any = [False] + [True] * len(self.cutoffs )
else:
a_ : Tuple = [False] + [False] * len(self.cutoffs )
a_ : Tuple = d_model
a_ : Optional[int] = d_embed
a_ : List[Any] = d_head
a_ : List[str] = d_inner
a_ : Tuple = div_val
a_ : Dict = pre_lnorm
a_ : Optional[Any] = n_layer
a_ : Dict = n_head
a_ : Any = mem_len
a_ : Union[str, Any] = same_length
a_ : Dict = attn_type
a_ : List[str] = clamp_len
a_ : str = sample_softmax
a_ : Any = adaptive
a_ : List[Any] = dropout
a_ : str = dropatt
a_ : Dict = untie_r
a_ : Tuple = init
a_ : Optional[int] = init_range
a_ : List[Any] = proj_init_std
a_ : Optional[int] = init_std
a_ : int = layer_norm_epsilon
super().__init__(eos_token_id=lowercase__ , **lowercase__ )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def lowercase_ ( self : Optional[int] , lowercase__ : Tuple ):
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 442 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = MobileBertTokenizer
_snake_case = MobileBertTokenizerFast
_snake_case = True
_snake_case = True
_snake_case = filter_non_english
_snake_case = """google/mobilebert-uncased"""
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
super().setUp()
__snake_case : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__snake_case : str = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple:
"""simple docstring"""
__snake_case : Any = 'UNwant\u00E9d,running'
__snake_case : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Dict = self.tokenizer_class(self.vocab_file)
__snake_case : int = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 1_2, 1_0, 1_1])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case : Dict = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : int = 'UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.tokenize(__a)
__snake_case : int = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
__snake_case : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a)
__snake_case : int = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
__snake_case : Optional[int] = self.get_rust_tokenizer()
__snake_case : List[str] = tokenizer.encode(__a)
__snake_case : int = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
# With lower casing
__snake_case : Tuple = self.get_tokenizer(do_lower_case=__a)
__snake_case : int = self.get_rust_tokenizer(do_lower_case=__a)
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : str = tokenizer.tokenize(__a)
__snake_case : Any = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
__snake_case : List[str] = tokenizer.encode(__a , add_special_tokens=__a)
__snake_case : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = tokenizer.encode(__a)
__snake_case : Optional[Any] = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : Optional[Any] = {}
for i, token in enumerate(__a):
__snake_case : str = i
__snake_case : Optional[Any] = WordpieceTokenizer(vocab=__a , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = self.tokenizer_class.from_pretrained('google/mobilebert-uncased')
__snake_case : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=__a)
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a)
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a)
__snake_case : str = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Dict = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__snake_case : Union[str, Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
__snake_case : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , 'do_lower_case') else False
__snake_case : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = ['的', '人', '有']
__snake_case : List[str] = ''.join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : Any = True
__snake_case : int = self.tokenizer_class.from_pretrained(__a , **__a)
__snake_case : int = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : List[Any] = tokenizer_p.encode(__a , add_special_tokens=__a)
__snake_case : Optional[Any] = tokenizer_r.encode(__a , add_special_tokens=__a)
__snake_case : int = tokenizer_r.convert_ids_to_tokens(__a)
__snake_case : List[Any] = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
__snake_case : Tuple = False
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a)
__snake_case : str = tokenizer_r.encode(__a , add_special_tokens=__a)
__snake_case : int = tokenizer_p.encode(__a , add_special_tokens=__a)
__snake_case : str = tokenizer_r.convert_ids_to_tokens(__a)
__snake_case : Any = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case : Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a) | 704 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A , id=A ) | 61 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase__ = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCamelCase : Any = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
lowerCamelCase : Dict = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 460 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 460 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '''▁'''
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCamelCase = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
UpperCamelCase = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = VOCAB_FILES_NAMES
__snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = ["input_ids", "attention_mask"]
def __init__( self: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Optional[Any]="<unk>" , UpperCAmelCase_: Dict="<pad>" , UpperCAmelCase_: int="<mask>" , UpperCAmelCase_: Optional[Dict[str, Any]] = None , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_SCREAMING_SNAKE_CASE = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = len(self.sp_model ) + self.fairseq_offset
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self: int , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """""".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , """ """ ).strip()
return out_string
def UpperCamelCase ( self: str , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 569 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__snake_case : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__snake_case : ClassVar[Features] = Features({"text": Value("string" )} )
__snake_case : ClassVar[Features] = Features({"labels": ClassLabel} )
__snake_case : str = "text"
__snake_case : str = "labels"
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
_SCREAMING_SNAKE_CASE = copy.deepcopy(self )
_SCREAMING_SNAKE_CASE = self.label_schema.copy()
_SCREAMING_SNAKE_CASE = features[self.label_column]
_SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 569 | 1 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
__a : Union[str, Any] = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
__a : List[Any] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
for example in examples:
__a : Dict = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
__a : str = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
__a : List[str] = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
__a : Tuple = pipeline(
'''video-classification''' , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
__a : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
__a : List[Any] = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , )
__a : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _lowerCamelCase ( self ):
pass | 52 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=5 ) -> int:
'''simple docstring'''
assert masked_input.count("""<mask>""" ) == 1
__SCREAMING_SNAKE_CASE = torch.tensor(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1
__SCREAMING_SNAKE_CASE = model(__UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple
__SCREAMING_SNAKE_CASE = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__SCREAMING_SNAKE_CASE = logits[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = prob.topk(k=__UpperCAmelCase , dim=0 )
__SCREAMING_SNAKE_CASE = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCAmelCase ) )] )
__SCREAMING_SNAKE_CASE = tokenizer.mask_token
__SCREAMING_SNAKE_CASE = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
__SCREAMING_SNAKE_CASE = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(__UpperCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(__UpperCAmelCase ) , __UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__UpperCAmelCase , __UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
a = CamembertTokenizer.from_pretrained("camembert-base")
a = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
a = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 13 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pegasus"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Any , __A :Optional[int]=5_0265 , __A :int=1024 , __A :Union[str, Any]=12 , __A :Union[str, Any]=4096 , __A :Tuple=16 , __A :Union[str, Any]=12 , __A :Tuple=4096 , __A :int=16 , __A :List[Any]=0.0 , __A :Tuple=0.0 , __A :Dict=True , __A :List[str]=True , __A :Optional[int]="gelu" , __A :int=1024 , __A :Optional[Any]=0.1 , __A :List[Any]=0.0 , __A :str=0.0 , __A :List[str]=0.0_2 , __A :str=0 , __A :str=False , __A :List[Any]=0 , __A :int=1 , __A :str=1 , **__A :Tuple , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _snake_case ( self :str ) -> int:
"""simple docstring"""
return self.d_model | 6 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCAmelCase = s_dict.pop(SCREAMING_SNAKE_CASE )
elif "subsample" in key:
lowerCAmelCase = s_dict.pop(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location="""cpu""" )
lowerCAmelCase = mam_aaa["""args"""]
lowerCAmelCase = mam_aaa["""model"""]
lowerCAmelCase = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
rename_keys(SCREAMING_SNAKE_CASE )
lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCAmelCase = args.share_decoder_input_output_embed
lowerCAmelCase = [int(SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split(""",""" )]
lowerCAmelCase = SpeechaTextConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=2_00 , use_cache=SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0 and not set(SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}' )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = lm_head_weights
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 532 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
def snake_case__ ( lowerCAmelCase_ = 1000000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =limit + 1
SCREAMING_SNAKE_CASE =[0] * limit
for first_term in range(1, lowerCAmelCase_ ):
for n in range(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
SCREAMING_SNAKE_CASE =sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 252 | 0 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowercase__ :list[int] ):
if not nums:
return 0
__lowerCamelCase = nums[0]
__lowerCamelCase = 0
for num in nums[1:]:
__lowerCamelCase ,__lowerCamelCase = (
max_excluding + num,
max(lowercase__, lowercase__ ),
)
return max(lowercase__, lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __snake_case (lowerCamelCase ):
__a = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 281 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = """deformable_detr"""
__magic_name__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=300 , __magic_name__=1024 , __magic_name__=6 , __magic_name__=1024 , __magic_name__=8 , __magic_name__=6 , __magic_name__=1024 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=256 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=True , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=4 , __magic_name__=4 , __magic_name__=4 , __magic_name__=False , __magic_name__=300 , __magic_name__=False , __magic_name__=1 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.1 , __magic_name__=0.25 , __magic_name__=False , **__magic_name__ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__magic_name__ , __magic_name__ ):
A_ : Dict = backbone_config.get('''model_type''' )
A_ : List[str] = CONFIG_MAPPING[backbone_model_type]
A_ : str = config_class.from_dict(__magic_name__ )
A_ : Union[str, Any] = use_timm_backbone
A_ : Tuple = backbone_config
A_ : Optional[Any] = num_channels
A_ : str = num_queries
A_ : Optional[int] = max_position_embeddings
A_ : Any = d_model
A_ : Union[str, Any] = encoder_ffn_dim
A_ : Optional[Any] = encoder_layers
A_ : str = encoder_attention_heads
A_ : Optional[int] = decoder_ffn_dim
A_ : Tuple = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : List[Any] = dropout
A_ : Union[str, Any] = attention_dropout
A_ : int = activation_dropout
A_ : Dict = activation_function
A_ : str = init_std
A_ : List[Any] = init_xavier_std
A_ : int = encoder_layerdrop
A_ : Dict = auxiliary_loss
A_ : Dict = position_embedding_type
A_ : Union[str, Any] = backbone
A_ : List[str] = use_pretrained_backbone
A_ : List[Any] = dilation
# deformable attributes
A_ : str = num_feature_levels
A_ : Optional[Any] = encoder_n_points
A_ : Optional[Any] = decoder_n_points
A_ : Tuple = two_stage
A_ : str = two_stage_num_proposals
A_ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A_ : Union[str, Any] = class_cost
A_ : Tuple = bbox_cost
A_ : Any = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Any = dice_loss_coefficient
A_ : str = bbox_loss_coefficient
A_ : Dict = giou_loss_coefficient
A_ : Dict = eos_coefficient
A_ : Union[str, Any] = focal_alpha
A_ : str = disable_custom_kernels
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.d_model
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A_ : Optional[int] = self.backbone_config.to_dict()
A_ : Tuple = self.__class__.model_type
return output
| 236 | import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : str = 10
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = [1, 2, 3, 4]
A_ : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
A_ , A_ : Optional[Any] = process_story(__magic_name__ )
self.assertEqual(__magic_name__ , [] )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = ''''''
A_ , A_ : Union[str, Any] = process_story(__magic_name__ )
self.assertEqual(__magic_name__ , [] )
self.assertEqual(__magic_name__ , [] )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
A_ , A_ : Optional[int] = process_story(__magic_name__ )
A_ : List[str] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__magic_name__ , __magic_name__ )
A_ : Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[str] = torch.tensor([1, 2, 3, 4] )
A_ : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[int] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = 101
A_ : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Optional[Any] = compute_token_type_ids(__magic_name__ , __magic_name__ )
np.testing.assert_array_equal(__magic_name__ , __magic_name__ )
| 236 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case_ : Any = 1
snake_case_ : Optional[int] = 1
while repunit:
snake_case_ : Optional[int] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 58 | class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
__A : List[str] = val
__A : str = None
__A : List[Any] = None
def __UpperCAmelCase( self , __UpperCAmelCase ):
if self.val:
if val < self.val:
if self.left is None:
__A : int = Node(__UpperCAmelCase )
else:
self.left.insert(__UpperCAmelCase )
elif val > self.val:
if self.right is None:
__A : int = Node(__UpperCAmelCase )
else:
self.right.insert(__UpperCAmelCase )
else:
__A : Any = val
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Tuple:
# Recursive traversal
if root:
inorder(root.left , _lowercase )
res.append(root.val )
inorder(root.right , _lowercase )
def lowerCamelCase_ ( _lowercase ) -> str:
# Build BST
if len(_lowercase ) == 0:
return arr
__A : Union[str, Any] = Node(arr[0] )
for i in range(1 , len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__A : str = []
inorder(_lowercase , _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 520 | 0 |
'''simple docstring'''
import baseaa
def _UpperCamelCase ( lowerCAmelCase__: str ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def _UpperCamelCase ( lowerCAmelCase__: bytes ) -> str:
return baseaa.baadecode(lowerCAmelCase__ ).decode('utf-8' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = "Hello World!"
SCREAMING_SNAKE_CASE : Tuple = baseaa_encode(test)
print(encoded)
SCREAMING_SNAKE_CASE : int = baseaa_decode(encoded)
print(decoded)
| 238 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 238 | 1 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : Tuple ) -> Any:
__magic_name__: Union[str, Any] = old_name
if "patch_embed" in old_name:
__magic_name__, __magic_name__, __magic_name__: List[str] = old_name.split(""".""" )
if layer == "0":
__magic_name__: Optional[int] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
__magic_name__: str = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
__magic_name__: Dict = old_name.replace("""3""" , """convolution2""" )
else:
__magic_name__: Dict = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" , __UpperCAmelCase ):
__magic_name__: int = R"""\b\d{2}\b"""
if bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) ):
__magic_name__: Tuple = re.search(R"""\d\.\d\d.""" , __UpperCAmelCase ).group()
else:
__magic_name__: int = re.search(R"""\d\.\d.""" , __UpperCAmelCase ).group()
if int(match[0] ) < 6:
__magic_name__: List[Any] = old_name.replace(__UpperCAmelCase , """""" )
__magic_name__: str = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
__magic_name__: Optional[Any] = """intermediate_stages.""" + trimmed_name
else:
__magic_name__: Any = old_name.replace(__UpperCAmelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
__magic_name__: Dict = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
__magic_name__: Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
__magic_name__: Union[str, Any] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
__magic_name__: Optional[int] = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
__magic_name__: Optional[Any] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
__magic_name__: Any = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
__magic_name__: int = trimmed_name.replace("""fc2""" , """linear_out""" )
__magic_name__: List[Any] = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" , __UpperCAmelCase ):
__magic_name__: Optional[int] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
__magic_name__: str = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__magic_name__: Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__magic_name__: List[Any] = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
__magic_name__: Optional[int] = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
__magic_name__: int = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
__magic_name__: Dict = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
__magic_name__: List[str] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__magic_name__: Tuple = new_name.replace("""norm""" , """layernorm""" )
__magic_name__: Dict = """efficientformer.""" + new_name
else:
__magic_name__: Union[str, Any] = """efficientformer.encoder.""" + new_name
return new_name
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
for key in checkpoint.copy().keys():
__magic_name__: List[str] = checkpoint.pop(__UpperCAmelCase )
__magic_name__: Union[str, Any] = val
return checkpoint
def a ( ) -> Dict:
__magic_name__: Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__: List[Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return image
def a ( __UpperCAmelCase : Path , __UpperCAmelCase : Path , __UpperCAmelCase : Path , __UpperCAmelCase : bool ) -> Any:
__magic_name__: Union[str, Any] = torch.load(__UpperCAmelCase , map_location="""cpu""" )["""model"""]
__magic_name__: List[Any] = EfficientFormerConfig.from_json_file(__UpperCAmelCase )
__magic_name__: Any = EfficientFormerForImageClassificationWithTeacher(__UpperCAmelCase )
__magic_name__: Dict = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
__magic_name__: int = config.depths[-1] - config.num_metaad_blocks + 1
__magic_name__: int = convert_torch_checkpoint(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
__magic_name__: Optional[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
__magic_name__: Any = prepare_img()
__magic_name__: List[Any] = 2_5_6
__magic_name__: Optional[int] = 2_2_4
__magic_name__: List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
__magic_name__: int = processor(images=__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
__magic_name__: Optional[int] = Compose(
[
Resize(__UpperCAmelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(__UpperCAmelCase ),
ToTensor(),
Normalize(__UpperCAmelCase , __UpperCAmelCase ),
] )
__magic_name__: List[str] = image_transforms(__UpperCAmelCase ).unsqueeze(0 )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[Any] = model(__UpperCAmelCase )
__magic_name__: Any = outputs.logits
__magic_name__: Any = (1, 1_0_0_0)
if "l1" in model_name:
__magic_name__: List[str] = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :1_0] , __UpperCAmelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__magic_name__: List[Any] = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :1_0] , __UpperCAmelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__magic_name__: Any = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(__UpperCAmelCase )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message="""Add model""" , use_temp_dir=__UpperCAmelCase , )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message="""Add image processor""" , use_temp_dir=__UpperCAmelCase , )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
__lowerCamelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 96 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_lowercase = parse(importlib.metadata.version('torch'))
def __UpperCamelCase ( a : Union[str, Version] , a : str , a : str ) ->Optional[Any]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
snake_case = STR_OPERATION_TO_FUNC[operation]
if isinstance(a , a ):
snake_case = parse(importlib.metadata.version(a ) )
return operation(a , parse(a ) )
def __UpperCamelCase ( a : str , a : str ) ->List[str]:
return compare_versions(a , a , a )
| 342 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Dict , __a : List[Any]=13 , __a : Union[str, Any]=2 , __a : int=24 , __a : Union[str, Any]=16 , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Dict=32 , __a : Dict=5 , __a : Any=4 , __a : Optional[int]=37 , __a : int="gelu" , __a : Any=0.1 , __a : Tuple=0.1 , __a : Dict=10 , __a : Union[str, Any]=0.02 , __a : Tuple=None , __a : Union[str, Any]=2 , __a : Any=2 , ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : Optional[int] = patch_size
__lowercase : int = max_length
__lowercase : Tuple = num_mel_bins
__lowercase : Optional[int] = is_training
__lowercase : List[str] = use_labels
__lowercase : List[Any] = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : str = attention_probs_dropout_prob
__lowercase : int = type_sequence_label_size
__lowercase : str = initializer_range
__lowercase : Optional[Any] = scope
__lowercase : int = frequency_stride
__lowercase : Dict = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase : int = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__lowercase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
__lowercase : Dict = frequency_out_dimension * time_out_dimension
__lowercase : List[Any] = num_patches + 2
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Any = self.get_config()
return config, input_values, labels
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase ( self : str , __a : List[Any] , __a : Union[str, Any] , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = ASTModel(config=__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[Any] = config_and_inputs
__lowercase : Dict = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_A : Any = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
_A : Union[str, Any] = False
_A : Dict = False
_A : Optional[Any] = False
_A : str = False
def lowerCAmelCase ( self : List[str] , __a : List[Any] , __a : Dict , __a : List[str] , __a : str , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Tuple = ASTModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : str = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : str = [*signature.parameters.keys()]
__lowercase : Any = ["""input_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = ASTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Optional[int] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
__lowercase , __lowercase : int = torchaudio.load(__snake_case )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.default_feature_extractor
__lowercase : Any = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(__a )
__lowercase : Dict = self.default_feature_extractor
__lowercase , __lowercase : Tuple = prepare_audio()
__lowercase : int = audio.squeeze().numpy()
__lowercase : Optional[int] = feature_extractor(__a , sampling_rate=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : int = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Union[str, Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) | 712 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''') | 649 | 0 |
'''simple docstring'''
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
SCREAMING_SNAKE_CASE__ : List[Any] =6
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : Dict =1_9_0_1
SCREAMING_SNAKE_CASE__ : List[str] =0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE__ : Any =day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
SCREAMING_SNAKE_CASE__ : str =day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE__ : List[str] =day - days_per_month[month - 2]
if month > 1_2:
year += 1
SCREAMING_SNAKE_CASE__ : Tuple =1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 296 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =min(UpperCamelCase__ ) # min() finds the minimum value
SCREAMING_SNAKE_CASE__ : int =max(UpperCamelCase__ ) # max() finds the maximum value
SCREAMING_SNAKE_CASE__ : List[Any] =max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE__ : Tuple =[0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__, UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE__ : List[Any] =0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE__ : Any =count + min_val
i += 1
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print('''Sorted order is:''', ''' '''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main() | 296 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class a__ ( A__ ):
UpperCAmelCase__ = '''roberta-prelayernorm'''
def __init__( self :str , _lowerCamelCase :List[Any]=50_265 , _lowerCamelCase :List[Any]=768 , _lowerCamelCase :str=12 , _lowerCamelCase :Optional[Any]=12 , _lowerCamelCase :List[str]=3_072 , _lowerCamelCase :Optional[Any]="gelu" , _lowerCamelCase :Dict=0.1 , _lowerCamelCase :str=0.1 , _lowerCamelCase :str=512 , _lowerCamelCase :List[Any]=2 , _lowerCamelCase :List[Any]=0.02 , _lowerCamelCase :Optional[int]=1E-1_2 , _lowerCamelCase :int=1 , _lowerCamelCase :str=0 , _lowerCamelCase :int=2 , _lowerCamelCase :Optional[Any]="absolute" , _lowerCamelCase :List[str]=True , _lowerCamelCase :str=None , **_lowerCamelCase :Any , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Dict =vocab_size
UpperCamelCase_ : str =hidden_size
UpperCamelCase_ : Optional[int] =num_hidden_layers
UpperCamelCase_ : Optional[int] =num_attention_heads
UpperCamelCase_ : List[Any] =hidden_act
UpperCamelCase_ : Any =intermediate_size
UpperCamelCase_ : Optional[int] =hidden_dropout_prob
UpperCamelCase_ : Tuple =attention_probs_dropout_prob
UpperCamelCase_ : str =max_position_embeddings
UpperCamelCase_ : List[str] =type_vocab_size
UpperCamelCase_ : Tuple =initializer_range
UpperCamelCase_ : Any =layer_norm_eps
UpperCamelCase_ : str =position_embedding_type
UpperCamelCase_ : Any =use_cache
UpperCamelCase_ : str =classifier_dropout
class a__ ( A__ ):
@property
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase_ : str ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_ : Any ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 395 |
"""simple docstring"""
def A_ ( __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__lowercase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(__lowercase ) == 1:
return True
UpperCamelCase_ : Tuple =series[1] - series[0]
for index in range(len(__lowercase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def A_ ( __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__lowercase ) == 0:
raise ValueError('Input list must be a non empty list' )
UpperCamelCase_ : List[Any] =0
for val in series:
answer += val
return answer / len(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 395 | 1 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 437 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = 'laion/clap-htsat-unfused'
_lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
def _lowerCAmelCase ( self , **A ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **A )
def _lowerCAmelCase ( self , **A ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCamelCase : List[str] = ClapProcessor(tokenizer=A , feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase : str = self.get_feature_extractor(do_normalize=A , padding_value=1.0 )
_lowerCamelCase : List[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.get_feature_extractor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Any = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : int = floats_list((3, 1000) )
_lowerCamelCase : str = feature_extractor(A , return_tensors='np' )
_lowerCamelCase : List[str] = processor(audios=A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.get_feature_extractor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : List[str] = 'This is a test string'
_lowerCamelCase : Any = processor(text=A )
_lowerCamelCase : Optional[int] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.get_feature_extractor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : int = processor.batch_decode(A )
_lowerCamelCase : int = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : int = ClapProcessor(tokenizer=A , feature_extractor=A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 437 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase_ : Optional[Any] = 5_00_00
lowerCamelCase_ : Optional[int] = 50_00
lowerCamelCase_ , lowerCamelCase_ : int = os.path.split(__file__)
lowerCamelCase_ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(_UpperCAmelCase ):
A_ : Any = dataset[i]
@get_duration
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ):
A_ : Any = dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
A_ : Optional[Any] = dataset[i]
@get_duration
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
A_ : Optional[Any] = dataset[i : i + batch_size]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = {'num examples': SPEED_TEST_N_EXAMPLES}
A_ : Union[str, Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
A_ : Tuple = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
A_ : Optional[Any] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
A_ : Tuple = generate_example_dataset(
os.path.join(_UpperCAmelCase , 'dataset.arrow' ) , _UpperCAmelCase , num_examples=_UpperCAmelCase , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(_UpperCAmelCase ) )
A_ : Optional[int] = func(_UpperCAmelCase , **_UpperCAmelCase )
print('shuffling dataset' )
A_ : Optional[int] = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(_UpperCAmelCase ) )
A_ : Union[str, Any] = func(
_UpperCAmelCase , **_UpperCAmelCase )
with open(_UpperCAmelCase , 'wb' ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 302 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int
lowercase_ : int
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A_ : Optional[int] = size
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._size
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Optional[Any] = 0
while queue:
A_ : Union[str, Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : int = current_distance + edge.weight
A_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A_ : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 302 | 1 |
from __future__ import annotations
import math
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> Optional[Any]:
'''simple docstring'''
if num <= 0:
A_ = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(_UpperCAmelCase )
A_ = [True] * (num + 1)
A_ = []
A_ = 2
A_ = int(math.sqrt(_UpperCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_UpperCAmelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _UpperCAmelCase ):
if sieve[i] is True:
A_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_UpperCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 188 |
_a : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_a : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_a : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 145 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__magic_name__ = 'src/transformers'
__magic_name__ = 'docs/source/en'
__magic_name__ = '.'
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(_A , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : Optional[Any] = f.readlines()
# Find the start prompt.
lowerCamelCase_ : List[str] = 0
while not lines[start_index].startswith(_A):
start_index += 1
start_index += 1
lowerCamelCase_ : str = start_index
while not lines[end_index].startswith(_A):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__magic_name__ = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__magic_name__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__magic_name__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__magic_name__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(TRANSFORMERS_PATH)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _A)
return [m.group(0) for m in matches]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = 2 if text == "✅" or text == "❌" else len(_A)
lowerCamelCase_ : Any = (width - text_length) // 2
lowerCamelCase_ : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCamelCase_ : Optional[Any] = {name: config.replace("Config" , "") for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCamelCase_ : List[Any] = collections.defaultdict(_A)
lowerCamelCase_ : Union[str, Any] = collections.defaultdict(_A)
lowerCamelCase_ : Optional[int] = collections.defaultdict(_A)
lowerCamelCase_ : Optional[int] = collections.defaultdict(_A)
lowerCamelCase_ : Dict = collections.defaultdict(_A)
# Let's lookup through all transformers object (once).
for attr_name in dir(_A):
lowerCamelCase_ : List[str] = None
if attr_name.endswith("Tokenizer"):
lowerCamelCase_ : str = slow_tokenizers
lowerCamelCase_ : Optional[Any] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast"):
lowerCamelCase_ : str = fast_tokenizers
lowerCamelCase_ : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(_A) is not None:
lowerCamelCase_ : Tuple = tf_models
lowerCamelCase_ : str = _re_tf_models.match(_A).groups()[0]
elif _re_flax_models.match(_A) is not None:
lowerCamelCase_ : str = flax_models
lowerCamelCase_ : Optional[int] = _re_flax_models.match(_A).groups()[0]
elif _re_pt_models.match(_A) is not None:
lowerCamelCase_ : int = pt_models
lowerCamelCase_ : Union[str, Any] = _re_pt_models.match(_A).groups()[0]
if lookup_dict is not None:
while len(_A) > 0:
if attr_name in model_name_to_prefix.values():
lowerCamelCase_ : Optional[Any] = True
break
# Try again after removing the last word in the name
lowerCamelCase_ : Optional[Any] = "".join(camel_case_split(_A)[:-1])
# Let's build that table!
lowerCamelCase_ : int = list(model_name_to_config.keys())
model_names.sort(key=str.lower)
lowerCamelCase_ : Dict = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCamelCase_ : List[Any] = [len(_A) + 2 for c in columns]
lowerCamelCase_ : Any = max([len(_A) for name in model_names]) + 2
# Build the table per se
lowerCamelCase_ : str = "|" + "|".join([_center_text(_A , _A) for c, w in zip(_A , _A)]) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n"
lowerCamelCase_ : Tuple = {True: "✅", False: "❌"}
for name in model_names:
lowerCamelCase_ : List[str] = model_name_to_prefix[name]
lowerCamelCase_ : str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_A , _A) for l, w in zip(_A , _A)]) + "|\n"
return table
def __magic_name__ ( lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = _find_text_in_file(
filename=os.path.join(_A , "index.md") , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
lowerCamelCase_ : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_A , "index.md") , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:])
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__magic_name__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from math import factorial
UpperCamelCase = {str(d): factorial(d) for d in range(10)}
def lowerCamelCase_ ( _lowercase ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(_lowercase ) )
def lowerCamelCase_ ( ) -> int:
__A : List[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _lowercase ) if sum_of_digit_factorial(_lowercase ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 520 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = "luke"
def __init__(self , A=5_0_2_6_7 , A=5_0_0_0_0_0 , A=7_6_8 , A=2_5_6 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=True , A=None , A=1 , A=0 , A=2 , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[str] = entity_vocab_size
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : str = entity_emb_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = use_entity_aware_attention
lowerCamelCase_ : Optional[Any] = classifier_dropout
| 422 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ ) -> List[str]:
_snake_case = []
_snake_case = set({'''(''', '''[''', '''{'''} )
_snake_case = set({''')''', ''']''', '''}'''} )
_snake_case = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(lowerCAmelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase_ ) == 0
def snake_case ( ) -> Any:
_snake_case = input('''Enter sequence of brackets: ''' )
if is_balanced(lowerCAmelCase_ ):
print(lowerCAmelCase_ , '''is balanced''' )
else:
print(lowerCAmelCase_ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 404 |
"""simple docstring"""
import math
import sys
def snake_case ( lowerCAmelCase_ ) -> int:
if number != int(lowerCAmelCase_ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
_snake_case = [-1] * (number + 1)
_snake_case = 0
for i in range(1 , number + 1 ):
_snake_case = sys.maxsize
_snake_case = int(math.sqrt(lowerCAmelCase_ ) )
for j in range(1 , root + 1 ):
_snake_case = 1 + answers[i - (j**2)]
_snake_case = min(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase_ : Union[str, Any] = 'docs/source/en/_toctree.yml'
def _lowerCamelCase ( lowercase : Optional[int] ) -> List[str]:
_a = defaultdict(lowerCAmelCase__ )
_a = []
_a = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(lowerCAmelCase__ )
_a = new_doc_list
_a = [key for key, value in counts.items() if value > 1]
_a = []
for duplicate_key in duplicates:
_a = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_a = sorted(lowerCAmelCase__ , key=lambda lowercase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase__ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(lowerCAmelCase__ )
# Sort
return overview_doc
def _lowerCamelCase ( lowercase : Optional[int]=False ) -> Union[str, Any]:
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
_a = yaml.safe_load(f.read() )
# Get to the API doc
_a = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a = content[api_idx]["sections"]
# Then to the model doc
_a = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_a = api_doc[scheduler_idx]["sections"]
_a = clean_doc_toc(lowerCAmelCase__ )
_a = False
if new_scheduler_doc != scheduler_doc:
_a = True
if overwrite:
_a = new_scheduler_doc
if diff:
if overwrite:
_a = api_doc
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def _lowerCamelCase ( lowercase : Optional[int]=False ) -> Tuple:
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
_a = yaml.safe_load(f.read() )
# Get to the API doc
_a = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a = content[api_idx]["sections"]
# Then to the model doc
_a = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_a = False
_a = api_doc[pipeline_idx]["sections"]
_a = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_a = pipeline_doc["section"]
_a = clean_doc_toc(lowerCAmelCase__ )
if overwrite:
_a = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase__ )
# sort overall pipeline doc
_a = clean_doc_toc(lowerCAmelCase__ )
if new_pipeline_docs != pipeline_docs:
_a = True
if overwrite:
_a = new_pipeline_docs
if diff:
if overwrite:
_a = api_doc
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 692 | import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case = logging.get_logger(__name__)
snake_case = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __A ( snake_case__ ):
'''simple docstring'''
a_ = '''dpt'''
def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-1_2 , _snake_case=384 , _snake_case=16 , _snake_case=3 , _snake_case=False , _snake_case=True , _snake_case=[2, 5, 8, 11] , _snake_case="project" , _snake_case=[4, 2, 1, 0.5] , _snake_case=[96, 192, 384, 768] , _snake_case=256 , _snake_case=-1 , _snake_case=False , _snake_case=True , _snake_case=0.4 , _snake_case=255 , _snake_case=0.1 , _snake_case=[1, 1024, 24, 24] , _snake_case=[0, 1] , _snake_case=None , **_snake_case , ):
super().__init__(**_snake_case )
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : int = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
_lowerCAmelCase : str = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_lowerCAmelCase : List[Any] = BitConfig(**_snake_case )
elif isinstance(_snake_case , _snake_case ):
logger.info("Initializing the config with a `BiT` backbone." )
_lowerCAmelCase : List[Any] = BitConfig(**_snake_case )
elif isinstance(_snake_case , _snake_case ):
_lowerCAmelCase : List[str] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
_lowerCAmelCase : Union[str, Any] = backbone_featmap_shape
_lowerCAmelCase : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
_lowerCAmelCase : str = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
_lowerCAmelCase : Optional[Any] = readout_type
_lowerCAmelCase : Union[str, Any] = reassemble_factors
_lowerCAmelCase : Tuple = neck_hidden_sizes
_lowerCAmelCase : Union[str, Any] = fusion_hidden_size
_lowerCAmelCase : int = head_in_index
_lowerCAmelCase : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Union[str, Any] = use_auxiliary_head
_lowerCAmelCase : str = auxiliary_loss_weight
_lowerCAmelCase : Any = semantic_loss_ignore_index
_lowerCAmelCase : Dict = semantic_classifier_dropout
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCAmelCase : Any = self.backbone_config.to_dict()
_lowerCAmelCase : str = self.__class__.model_type
return output
| 424 | 0 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> None:
lowercase__ : Union[str, Any] = data
lowercase__ : Dict = None
lowercase__ : Dict = None
def _lowerCamelCase ( lowerCamelCase__ : Dict ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _lowerCamelCase ( lowerCamelCase__ : List[Any] ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _lowerCamelCase ( lowerCamelCase__ : Tuple ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _lowerCamelCase ( ): # Main function for testing.
lowercase__ : Tuple = Node(1 )
lowercase__ : Union[str, Any] = Node(2 )
lowercase__ : Optional[Any] = Node(3 )
lowercase__ : int = Node(4 )
lowercase__ : Optional[int] = Node(5 )
lowercase__ : List[Any] = Node(6 )
lowercase__ : int = Node(7 )
lowercase__ : Optional[int] = Node(8 )
lowercase__ : List[Any] = Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE_ ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE_ ) )
print("""Tree is: """ )
display(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 700 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__( lowerCamelCase__ ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__( self ) -> Dict:
raise NotImplementedError() | 128 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if digit_amount > 0:
return round(number - int(UpperCamelCase__ ) , UpperCamelCase__ )
return number - int(UpperCamelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3)) | 407 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :List[str] = XLNetTokenizer
lowerCAmelCase :Union[str, Any] = XLNetTokenizerFast
lowerCAmelCase :Union[str, Any] = True
lowerCAmelCase :int = True
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Optional[Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """<s>"""
UpperCAmelCase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase) , _lowerCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase) , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(_lowerCamelCase) , 1006)
def snake_case__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def snake_case__ ( self):
UpperCAmelCase__ : int = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
UpperCAmelCase__ : str = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase) , [285, 46, 10, 170, 382])
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
UpperCAmelCase__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case__ ( self):
# fmt: off
UpperCAmelCase__ : List[Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , ) | 407 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowercase = logging.get_logger(__name__)
_lowercase = {}
_lowercase = {}
_lowercase = {}
def lowerCAmelCase__ ( UpperCamelCase_ : type , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[List[str]] = None , )-> Any:
A__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
A__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
A__ = format_type
def lowerCAmelCase__ ( UpperCamelCase_ : Exception , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[List[str]] = None )-> Optional[Any]:
A__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
_lowercase = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
_lowercase = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
_lowercase = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[str] )-> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : Optional[int] )-> Formatter:
A__ = get_format_type_from_alias(UpperCamelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 526 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = (DDPMScheduler,)
def snake_case_ ( self , **a__):
A__ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**a__)
return config
def snake_case_ ( self):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a__)
def snake_case_ ( self):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=a__ , beta_end=a__)
def snake_case_ ( self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a__)
def snake_case_ ( self):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a__)
def snake_case_ ( self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a__)
def snake_case_ ( self):
self.check_over_configs(thresholding=a__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , )
def snake_case_ ( self):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a__)
def snake_case_ ( self):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=a__)
def snake_case_ ( self):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0_0_9_7_9)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.0_2)) < 1e-5
def snake_case_ ( self):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
A__ = len(a__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(a__)):
# 1. predict noise residual
A__ = model(a__ , a__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(a__ , a__ , a__ , generator=a__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(a__))
A__ = torch.mean(torch.abs(a__))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1e-3
def snake_case_ ( self):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='''v_prediction''')
A__ = scheduler_class(**a__)
A__ = len(a__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(a__)):
# 1. predict noise residual
A__ = model(a__ , a__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(a__ , a__ , a__ , generator=a__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(a__))
A__ = torch.mean(torch.abs(a__))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1e-3
def snake_case_ ( self):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
A__ = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=a__)
A__ = scheduler.timesteps
for i, timestep in enumerate(a__):
if i == len(a__) - 1:
A__ = -1
else:
A__ = timesteps[i + 1]
A__ = scheduler.previous_timestep(a__)
A__ = prev_t.item()
self.assertEqual(a__ , a__)
def snake_case_ ( self):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
A__ = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(a__ , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=a__)
def snake_case_ ( self):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
A__ = [1_0_0, 8_7, 5_0, 1, 0]
A__ = len(a__)
with self.assertRaises(a__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=a__ , timesteps=a__)
def snake_case_ ( self):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
A__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=a__)
| 526 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowerCAmelCase__ : Dict = gray_code_sequence_string(a_ )
#
# convert them to integers
for i in range(len(a_ ) ):
lowerCAmelCase__ : List[str] = int(sequence[i] , 2 )
return sequence
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCAmelCase__ : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCAmelCase__ : int = gray_code_sequence_string(bit_count - 1 )
lowerCAmelCase__ : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCAmelCase__ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(a_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCAmelCase__ : Tuple = '''1''' + smaller_sequence[i]
sequence.append(a_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 378 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int = 50 ) -> int:
__SCREAMING_SNAKE_CASE :List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }') | 498 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : Dict = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 414 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = LxmertTokenizer
lowercase = LxmertTokenizerFast
lowercase = True
lowercase = True
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = "UNwant\u00E9d,running"
UpperCamelCase = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file )
UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = "I was born in 92000, and this is falsé."
UpperCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 414 | 1 |
"""simple docstring"""
import os
import sys
import unittest
__A : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : Optional[Any] = os.path.join(git_repo_path, '''src''', '''transformers''')
__A : Optional[Any] = '\n{0} = None\n'
__A : Tuple = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__A : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> Union[str, Any]:
lowercase_ : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
lowercase_ : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
lowercase_ : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
lowercase_ : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
lowercase_ : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
lowercase_ : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def A ( self : Optional[Any] ) -> List[Any]:
lowercase_ : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
lowercase_ : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowercase_ : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
lowercase_ : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def A ( self : Optional[Any] ) -> Any:
lowercase_ : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
lowercase_ : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 231 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
__UpperCamelCase = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCamelCase = '>>zh<<'
__UpperCamelCase = 'Helsinki-NLP/'
if is_torch_available():
__UpperCamelCase = 'pt'
elif is_tf_available():
__UpperCamelCase = 'tf'
else:
__UpperCamelCase = 'jax'
@require_sentencepiece
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = MarianTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = True
def __A ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
SCREAMING_SNAKE_CASE = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self , lowerCAmelCase__ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = '''</s>'''
SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def __A ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
SCREAMING_SNAKE_CASE = en_de_tokenizer(['I am a small frog'] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = [x.name for x in Path(UpperCamelCase__ ).glob('*' )]
self.assertIn('source.spm' , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = tok(
['I am a small frog' * 1_000, 'I am a small frog'] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = tok(['I am a tiny frog', 'I am a small frog'] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = {'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
SCREAMING_SNAKE_CASE = '''Tämä on testi'''
SCREAMING_SNAKE_CASE = '''This is a test'''
SCREAMING_SNAKE_CASE = [76, 7, 2_047, 2]
SCREAMING_SNAKE_CASE = [69, 12, 11, 940, 2]
SCREAMING_SNAKE_CASE = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) | 712 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
SCREAMING_SNAKE_CASE_ : str = "text"
SCREAMING_SNAKE_CASE_ : str = "labels"
def __A ( self , lowerCAmelCase__ ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE = copy.deepcopy(self )
SCREAMING_SNAKE_CASE = self.label_schema.copy()
SCREAMING_SNAKE_CASE = features[self.label_column]
SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def __A ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 327 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__a :Dict = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = 'ernie_m'
_lowerCamelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , UpperCAmelCase : int = 250002 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 3072 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 514 , UpperCAmelCase : float = 0.02 , UpperCAmelCase : int = 1 , UpperCAmelCase : float = 1E-05 , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Any=0.0 , **UpperCAmelCase : str , ):
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = classifier_dropout
A_ = is_decoder
A_ = act_dropout | 86 |
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features']
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : List[str]=80 ,lowerCAmelCase__ : Dict=1_60_00 ,lowerCAmelCase__ : Any=1_60 ,lowerCAmelCase__ : Union[str, Any]=30 ,lowerCAmelCase__ : Optional[int]=4_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : int=False ,**lowerCAmelCase__ : Union[str, Any] ,) -> Any:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : List[str] = n_fft
lowerCAmelCase_ : Union[str, Any] = hop_length
lowerCAmelCase_ : int = chunk_length
lowerCAmelCase_ : List[Any] = chunk_length * sampling_rate
lowerCAmelCase_ : Dict = self.n_samples // hop_length
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=lowerCAmelCase__ ,min_frequency=0.0 ,max_frequency=8_000.0 ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : str = spectrogram(
lowerCAmelCase__ ,window_function(self.n_fft ,"hann" ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel="log10" ,)
lowerCAmelCase_ : Dict = log_spec[:, :-1]
lowerCAmelCase_ : Optional[Any] = np.maximum(lowerCAmelCase__ ,log_spec.max() - 8.0 )
lowerCAmelCase_ : Optional[int] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase_ : int = np.array(lowerCAmelCase__ ,np.intaa )
lowerCAmelCase_ : Any = []
for vector, length in zip(lowerCAmelCase__ ,attention_mask.sum(-1 ) ):
lowerCAmelCase_ : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ : Tuple = padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : bool = True ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[str] = "max_length" ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Any = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : List[Any] = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Union[str, Any] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : List[str] = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray([raw_speech] ).T]
lowerCAmelCase_ : int = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCAmelCase_ : Tuple = self.pad(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=max_length if max_length else self.n_samples ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=return_attention_mask or do_normalize ,)
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase_ : Optional[int] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] ,attention_mask=padded_inputs["attention_mask"] ,padding_value=self.padding_value ,)
lowerCAmelCase_ : Tuple = np.stack(padded_inputs["input_features"] ,axis=0 )
# make sure list is in array format
lowerCAmelCase_ : int = padded_inputs.get("input_features" ).transpose(2 ,0 ,1 )
lowerCAmelCase_ : str = [self._np_extract_fbank_features(lowerCAmelCase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase_ : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase_ : str = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase_ : Any = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def UpperCAmelCase_ ( self : List[Any] ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 683 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
_a = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_a = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _UpperCAmelCase , )
is not None
):
_a = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_a = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_a = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_a = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_a = True
if not attribute_used:
_a = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_a = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_a = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_a = True
elif attribute.endswith('_token_id' ):
_a = True
# configuration class specific cases
if not case_allowed:
_a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_a = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
_a = dict(inspect.signature(config_class.__init__ ).parameters )
_a = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_a = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_a = {}
if len(config_class.attribute_map ) > 0:
_a = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_a = inspect.getsourcefile(_UpperCAmelCase )
_a = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_a = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_a = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_a = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_a = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> str:
_a = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_a = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_a = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_a = unused_attributes
if len(_UpperCAmelCase ) > 0:
_a = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 562 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any]=13 , lowercase_ : Optional[Any]=32 , lowercase_ : int=3 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[int]=[10, 20, 30, 40] , lowercase_ : Optional[Any]=[2, 2, 3, 2] , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : str=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=["stage2", "stage3", "stage4"] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Any=None , ):
snake_case_ : Union[str, Any] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = num_channels
snake_case_ : str = num_stages
snake_case_ : str = hidden_sizes
snake_case_ : Any = depths
snake_case_ : int = is_training
snake_case_ : Optional[int] = use_labels
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : int = num_labels
snake_case_ : str = initializer_range
snake_case_ : Optional[int] = out_features
snake_case_ : int = out_indices
snake_case_ : Union[str, Any] = scope
def _snake_case ( self : str ):
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Tuple = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Union[str, Any] ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _snake_case ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Dict ):
snake_case_ : Optional[Any] = ConvNextModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ):
snake_case_ : Any = ConvNextForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int] ):
snake_case_ : List[str] = ConvNextBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : int = model(lowercase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ : Optional[Any] = None
snake_case_ : List[Any] = ConvNextBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : str = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _snake_case ( self : Tuple ):
snake_case_ : List[str] = self.prepare_config_and_inputs()
snake_case_ : List[Any] = config_and_inputs
snake_case_ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[str] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Dict = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase : Any = True
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[str] = False
def _snake_case ( self : Optional[int] ):
snake_case_ : Union[str, Any] = ConvNextModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Dict ):
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def _snake_case ( self : List[Any] ):
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def _snake_case ( self : Dict ):
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def _snake_case ( self : str ):
pass
def _snake_case ( self : Any ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(lowercase_ )
snake_case_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : int = [*signature.parameters.keys()]
snake_case_ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def _snake_case ( self : str ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : int ):
snake_case_ : Tuple = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _snake_case ( self : str ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _snake_case ( self : int ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Tuple = ConvNextModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __lowercase ( ):
snake_case_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
@cached_property
def _snake_case ( self : Any ):
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Any = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(lowercase_ )
snake_case_ : Tuple = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : int = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ : Optional[Any] = model(**lowercase_ )
# verify the logits
snake_case_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ : Optional[Any] = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , lowerCAmelCase__):
_lowerCAmelCase : int = (ConvNextBackbone,) if is_torch_available() else ()
_lowerCAmelCase : Dict = ConvNextConfig
_lowerCAmelCase : int = False
def _snake_case ( self : Union[str, Any] ):
snake_case_ : int = ConvNextModelTester(self )
| 719 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Tuple = """linear"""
_lowerCAmelCase : Optional[int] = """cosine"""
_lowerCAmelCase : Optional[int] = """cosine_with_restarts"""
_lowerCAmelCase : Union[str, Any] = """polynomial"""
_lowerCAmelCase : Optional[Any] = """constant"""
_lowerCAmelCase : Optional[Any] = """constant_with_warmup"""
_lowerCAmelCase : Union[str, Any] = """piecewise_constant"""
def __lowercase ( _a , _a = -1 ):
return LambdaLR(_a , lambda _a : 1 , last_epoch=_a )
def __lowercase ( _a , _a , _a = -1 ):
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1.0 , _a ) )
return 1.0
return LambdaLR(_a , _a , last_epoch=_a )
def __lowercase ( _a , _a , _a = -1 ):
snake_case_ : Dict = {}
snake_case_ : Optional[Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
snake_case_, snake_case_ : Union[str, Any] = rule_str.split(''':''' )
snake_case_ : Dict = int(_a )
snake_case_ : int = float(_a )
snake_case_ : Any = value
snake_case_ : Any = float(rule_list[-1] )
def create_rules_function(_a , _a ):
def rule_func(_a ) -> float:
snake_case_ : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_a ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
snake_case_ : List[str] = create_rules_function(_a , _a )
return LambdaLR(_a , _a , last_epoch=_a )
def __lowercase ( _a , _a , _a , _a=-1 ):
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1 , _a ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_a , _a , _a )
def __lowercase ( _a , _a , _a , _a = 0.5 , _a = -1 ):
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1 , _a ) )
snake_case_ : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_a ) * 2.0 * progress )) )
return LambdaLR(_a , _a , _a )
def __lowercase ( _a , _a , _a , _a = 1 , _a = -1 ):
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1 , _a ) )
snake_case_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_a ) * progress) % 1.0) )) )
return LambdaLR(_a , _a , _a )
def __lowercase ( _a , _a , _a , _a=1E-7 , _a=1.0 , _a=-1 ):
snake_case_ : List[str] = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1 , _a ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
snake_case_ : Tuple = lr_init - lr_end
snake_case_ : List[str] = num_training_steps - num_warmup_steps
snake_case_ : str = 1 - (current_step - num_warmup_steps) / decay_steps
snake_case_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_a , _a , _a )
lowercase__ : Dict = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowercase ( _a , _a , _a = None , _a = None , _a = None , _a = 1 , _a = 1.0 , _a = -1 , ):
snake_case_ : Union[str, Any] = SchedulerType(_a )
snake_case_ : Any = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_a , last_epoch=_a )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_a , step_rules=_a , last_epoch=_a )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_a , num_warmup_steps=_a , last_epoch=_a )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_a , num_warmup_steps=_a , num_training_steps=_a , num_cycles=_a , last_epoch=_a , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_a , num_warmup_steps=_a , num_training_steps=_a , power=_a , last_epoch=_a , )
return schedule_func(
_a , num_warmup_steps=_a , num_training_steps=_a , last_epoch=_a )
| 485 | 0 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
__lowerCamelCase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCamelCase = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCamelCase = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def __SCREAMING_SNAKE_CASE ( self : int , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=False ):
if concatenate_texts:
return compute_measures(lowercase_ , lowercase_ )["wer"]
else:
UpperCAmelCase__ :str = 0
UpperCAmelCase__ :str = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
UpperCAmelCase__ :Optional[int] = compute_measures(lowercase_ , lowercase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 467 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _a ( __a ):
"""simple docstring"""
A_ = '''table-transformer'''
A_ = ['''past_key_values''']
A_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : str , lowercase_ : str=True , lowercase_ : List[str]=None , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=100 , lowercase_ : Optional[int]=6 , lowercase_ : Optional[Any]=2_048 , lowercase_ : List[Any]=8 , lowercase_ : Optional[Any]=6 , lowercase_ : int=2_048 , lowercase_ : Any=8 , lowercase_ : Optional[Any]=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=True , lowercase_ : int="relu" , lowercase_ : Tuple=256 , lowercase_ : Dict=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : int=0.0_2 , lowercase_ : List[str]=1.0 , lowercase_ : Optional[int]=False , lowercase_ : List[Any]="sine" , lowercase_ : Optional[int]="resnet50" , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : List[str]=1 , lowercase_ : Any=5 , lowercase_ : Optional[int]=2 , lowercase_ : Dict=1 , lowercase_ : Optional[int]=1 , lowercase_ : Dict=5 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowercase_ = backbone_config.get("""model_type""" )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowercase_ , lowercase_ , lowercase_ = None, None, None
lowercase_ = use_timm_backbone
lowercase_ = backbone_config
lowercase_ = num_channels
lowercase_ = num_queries
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = init_xavier_std
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = encoder_layers
lowercase_ = auxiliary_loss
lowercase_ = position_embedding_type
lowercase_ = backbone
lowercase_ = use_pretrained_backbone
lowercase_ = dilation
# Hungarian matcher
lowercase_ = class_cost
lowercase_ = bbox_cost
lowercase_ = giou_cost
# Loss coefficients
lowercase_ = mask_loss_coefficient
lowercase_ = dice_loss_coefficient
lowercase_ = bbox_loss_coefficient
lowercase_ = giou_loss_coefficient
lowercase_ = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return self.d_model
class _a ( __a ):
"""simple docstring"""
A_ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 12
| 451 | 0 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
__lowerCAmelCase : List[Any] = psutil.Process()
__lowerCAmelCase : List[Any] = False
def snake_case ( self ) -> int:
__lowerCAmelCase : Tuple = -1
while True:
__lowerCAmelCase : Any = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case ( self ) -> int:
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = threading.Thread(target=self.peak_monitor )
__lowerCAmelCase : Dict = True
self.thread.start()
def snake_case ( self ) -> Optional[int]:
__lowerCAmelCase : str = False
self.thread.join()
return self.cpu_memory_peak
A_ = PeakCPUMemory()
def A ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Dict = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Tuple = torch.cuda.memory_allocated(_UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def A ( _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**2_0
__lowerCAmelCase : int = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**2_0
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**2_0
return measures
def A ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCAmelCase )]:.2f}MiB""" )
__lowerCAmelCase : int = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 123 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( a , unittest.TestCase ):
'''simple docstring'''
_snake_case = OpenAIGPTTokenizer
_snake_case = OpenAIGPTTokenizerFast
_snake_case = True
_snake_case = False
def snake_case ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowerCAmelCase : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Any:
return "lower newer", "lower newer"
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__lowerCAmelCase : List[str] = 'lower'
__lowerCAmelCase : Union[str, Any] = ['low', 'er</w>']
__lowerCAmelCase : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = tokens + ['<unk>']
__lowerCAmelCase : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
__lowerCAmelCase : Optional[Any] = 'This is a simple input'
__lowerCAmelCase : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
__lowerCAmelCase : int = ('This is a simple input', 'This is a pair')
__lowerCAmelCase : Optional[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
def snake_case ( self ) -> int:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase__ ( a ):
'''simple docstring'''
pass
| 123 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 91 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be trained."""} )
_SCREAMING_SNAKE_CASE = field(
default="""./""" ,metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path of training dataset."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size for training."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(default=0.1 ,metadata={"""help""": """Value of weight decay."""} )
_SCREAMING_SNAKE_CASE = field(
default=10_000 ,metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2E-4 ,metadata={"""help""": """Learning rate fo training."""} )
_SCREAMING_SNAKE_CASE = field(default="""cosine""" ,metadata={"""help""": """Learning rate."""} )
_SCREAMING_SNAKE_CASE = field(
default=750 ,metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
_SCREAMING_SNAKE_CASE = field(
default=16 ,metadata={"""help""": """Number of gradient accumulation steps."""} )
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
_SCREAMING_SNAKE_CASE = field(default=50_000 ,metadata={"""help""": """Maximum number of training steps."""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1_024 ,metadata={"""help""": """Sequence lengths used for training."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Training seed."""} )
_SCREAMING_SNAKE_CASE = field(
default=1_024 ,metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1_024 ,metadata={"""help""": """Length of sequences to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """Sample from the language model's output distribution."""} )
_SCREAMING_SNAKE_CASE = field(default=0.2 ,metadata={"""help""": """Sampling temperature used for generation."""} )
_SCREAMING_SNAKE_CASE = field(default=256 ,metadata={"""help""": """Maximum number of newly generated tokens."""} )
_SCREAMING_SNAKE_CASE = field(default=0 ,metadata={"""help""": """Top-k parameter used for generation."""} )
_SCREAMING_SNAKE_CASE = field(default=0.9_5 ,metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
_SCREAMING_SNAKE_CASE = field(default=10 ,metadata={"""help""": """Number of generations to run in parallel."""} )
_SCREAMING_SNAKE_CASE = field(
default=200 ,metadata={"""help""": """Number of completions to generate for each sample."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" ,metadata={"""help""": """Random seed used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default="""0""" ,metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} ,)
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} ,)
_SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" ,metadata={"""help""": """Folder or name of dataset to process."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" ,metadata={"""help""": """Folder to save processed processed dataset."""} )
_SCREAMING_SNAKE_CASE = field(
default=100_000 ,metadata={"""help""": """Number of files to save per JSON output file."""} )
_SCREAMING_SNAKE_CASE = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_SCREAMING_SNAKE_CASE = field(
default=1_000 ,metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=100 ,metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.2_5 ,metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=1.5 ,metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.7 ,metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """If True, near-duplicate samples are removed."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.8_5 ,metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""gpt2""" ,metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
_SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" ,metadata={"""help""": """Dataset to train tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_SCREAMING_SNAKE_CASE = field(default=200_000 ,metadata={"""help""": """Number of examples to train tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(
default=32_768 ,metadata={"""help""": """Number of examples to train the tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(default="""codeparrot""" ,metadata={"""help""": """Name of new tokenizer."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
_SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" ,metadata={"""help""": """Repo name of the pretokenized data."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" ,metadata={"""help""": """Configuration to use for model initialization."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Tokenizer attached to model."""} )
_SCREAMING_SNAKE_CASE = field(default="""codeparrot""" ,metadata={"""help""": """Name of the created model."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 326 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = StableDiffusionDiffEditPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
lowerCamelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase__ = frozenset([] )
def __a ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
lowerCAmelCase_ = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_zero=_a , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , _a , _a=0 ) -> Any:
lowerCAmelCase_ = floats_tensor((1, 16, 16) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self , _a , _a=0 ) -> Optional[Any]:
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(_a ) ).convert("RGB" )
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self , _a , _a=0 ) -> List[Any]:
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(_a ) ).convert("RGB" )
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[int]:
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_a , _a , _a )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
lowerCAmelCase_ = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_a , _a ) is None , f"`{optional_component}` did not stay set to None after loading." , )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = pipe_loaded(**_a )[0]
lowerCAmelCase_ = np.abs(output - output_loaded ).max()
self.assertLess(_a , 1E-4 )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "cpu"
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_mask_inputs(_a )
lowerCAmelCase_ = pipe.generate_mask(**_a )
lowerCAmelCase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCAmelCase_ = np.array([0] * 9 )
lowerCAmelCase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "cpu"
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inversion_inputs(_a )
lowerCAmelCase_ = pipe.invert(**_a ).images
lowerCAmelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase_ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
def __a ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "cpu"
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = {"beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "beta_schedule": "scaled_linear"}
lowerCAmelCase_ = DPMSolverMultistepScheduler(**_a )
lowerCAmelCase_ = DPMSolverMultistepInverseScheduler(**_a )
lowerCAmelCase_ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inversion_inputs(_a )
lowerCAmelCase_ = pipe.invert(**_a ).images
lowerCAmelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase_ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __a ( cls ) -> str:
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCAmelCase_ = raw_image.convert("RGB" ).resize((768, 768) )
lowerCAmelCase_ = raw_image
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=_a , torch_dtype=torch.floataa )
lowerCAmelCase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "a bowl of fruit"
lowerCAmelCase_ = "a bowl of pears"
lowerCAmelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=_a , target_prompt=_a , generator=_a , )
lowerCAmelCase_ = pipe.invert(
prompt=_a , image=self.raw_image , inpaint_strength=0.7 , generator=_a ).latents
lowerCAmelCase_ = pipe(
prompt=_a , mask_image=_a , image_latents=_a , generator=_a , negative_prompt=_a , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCAmelCase_ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __a ( self ) -> Dict:
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=_a , torch_dtype=torch.floataa )
lowerCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "a bowl of fruit"
lowerCAmelCase_ = "a bowl of pears"
lowerCAmelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=_a , target_prompt=_a , generator=_a , )
lowerCAmelCase_ = pipe.invert(
prompt=_a , image=self.raw_image , inpaint_strength=0.7 , generator=_a , num_inference_steps=25 , ).latents
lowerCAmelCase_ = pipe(
prompt=_a , mask_image=_a , image_latents=_a , generator=_a , negative_prompt=_a , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCAmelCase_ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 226 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 226 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCamelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) ->IIRFilter:
snake_case = tau * frequency / samplerate
snake_case = sin(a )
snake_case = cos(a )
snake_case = _sin / (2 * q_factor)
snake_case = (1 - _cos) / 2
snake_case = 1 - _cos
snake_case = 1 + alpha
snake_case = -2 * _cos
snake_case = 1 - alpha
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) ->IIRFilter:
snake_case = tau * frequency / samplerate
snake_case = sin(a )
snake_case = cos(a )
snake_case = _sin / (2 * q_factor)
snake_case = (1 + _cos) / 2
snake_case = -1 - _cos
snake_case = 1 + alpha
snake_case = -2 * _cos
snake_case = 1 - alpha
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) ->IIRFilter:
snake_case = tau * frequency / samplerate
snake_case = sin(a )
snake_case = cos(a )
snake_case = _sin / (2 * q_factor)
snake_case = _sin / 2
snake_case = 0
snake_case = -ba
snake_case = 1 + alpha
snake_case = -2 * _cos
snake_case = 1 - alpha
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) ->IIRFilter:
snake_case = tau * frequency / samplerate
snake_case = sin(a )
snake_case = cos(a )
snake_case = _sin / (2 * q_factor)
snake_case = 1 - alpha
snake_case = -2 * _cos
snake_case = 1 + alpha
snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) ->IIRFilter:
snake_case = tau * frequency / samplerate
snake_case = sin(a )
snake_case = cos(a )
snake_case = _sin / (2 * q_factor)
snake_case = 10 ** (gain_db / 40)
snake_case = 1 + alpha * big_a
snake_case = -2 * _cos
snake_case = 1 - alpha * big_a
snake_case = 1 + alpha / big_a
snake_case = -2 * _cos
snake_case = 1 - alpha / big_a
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) ->IIRFilter:
snake_case = tau * frequency / samplerate
snake_case = sin(a )
snake_case = cos(a )
snake_case = _sin / (2 * q_factor)
snake_case = 10 ** (gain_db / 40)
snake_case = (big_a + 1) - (big_a - 1) * _cos
snake_case = (big_a + 1) + (big_a - 1) * _cos
snake_case = (big_a - 1) - (big_a + 1) * _cos
snake_case = (big_a - 1) + (big_a + 1) * _cos
snake_case = 2 * sqrt(a ) * alpha
snake_case = big_a * (pmc + aaa)
snake_case = 2 * big_a * mpc
snake_case = big_a * (pmc - aaa)
snake_case = ppmc + aaa
snake_case = -2 * pmpc
snake_case = ppmc - aaa
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) ->IIRFilter:
snake_case = tau * frequency / samplerate
snake_case = sin(a )
snake_case = cos(a )
snake_case = _sin / (2 * q_factor)
snake_case = 10 ** (gain_db / 40)
snake_case = (big_a + 1) - (big_a - 1) * _cos
snake_case = (big_a + 1) + (big_a - 1) * _cos
snake_case = (big_a - 1) - (big_a + 1) * _cos
snake_case = (big_a - 1) + (big_a + 1) * _cos
snake_case = 2 * sqrt(a ) * alpha
snake_case = big_a * (ppmc + aaa)
snake_case = -2 * big_a * pmpc
snake_case = big_a * (ppmc - aaa)
snake_case = pmc + aaa
snake_case = 2 * mpc
snake_case = pmc - aaa
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 342 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> int:
snake_case = 0
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(A__ ) / '''preprocessor_config.json'''
snake_case = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
snake_case = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def UpperCamelCase ( self ) -> str:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(A__ ) / '''preprocessor_config.json'''
snake_case = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
snake_case = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(A__ ) / '''preprocessor_config.json'''
snake_case = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
snake_case = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def UpperCamelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
snake_case = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case = AutoImageProcessor.from_pretrained('''clip-base''' )
def UpperCamelCase ( self ) -> int:
with self.assertRaisesRegex(
A__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def UpperCamelCase ( self ) -> str:
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A__ ):
snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
snake_case = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def UpperCamelCase ( self ) -> Optional[int]:
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(A__ ) / '''preprocessor_config.json'''
snake_case = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
snake_case = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
snake_case = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self ) -> List[Any]:
class _lowercase ( __a ):
_UpperCAmelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 342 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ : Tuple = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : int = 8 , **SCREAMING_SNAKE_CASE_ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_pad
lowerCAmelCase_ : Union[str, Any] = pad_size
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = get_image_size(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = (old_height // size + 1) * size - old_height
lowerCAmelCase_ : Any = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : List[Any] = do_pad if do_pad is not None else self.do_pad
lowerCAmelCase_ : Any = pad_size if pad_size is not None else self.pad_size
lowerCAmelCase_ : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_pad:
lowerCAmelCase_ : Any = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 317 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
if index == len(lowerCAmelCase__ ):
return True
# Recursive Step
for i in range(lowerCAmelCase__ ):
if valid_coloring(graph[index] , lowerCAmelCase__ , lowerCAmelCase__ ):
# Color current vertex
lowerCAmelCase_ : List[str] = i
# Validate coloring
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index + 1 ):
return True
# Backtrack
lowerCAmelCase_ : Dict = -1
return False
def UpperCamelCase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : str = [-1] * len(lowerCAmelCase__ )
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 0 ):
return colored_vertices
return []
| 317 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Any , __A : str=13 , __A : str=7 , __A : Union[str, Any]=True , __A : int=True , __A : Optional[int]=False , __A : Tuple=True , __A : Union[str, Any]=99 , __A : List[str]=32 , __A : Tuple=5 , __A : Tuple=4 , __A : Union[str, Any]=37 , __A : int="gelu" , __A : int=0.1 , __A : Dict=0.1 , __A : int=512 , __A : List[str]=16 , __A : Union[str, Any]=2 , __A : Dict=0.0_2 , __A : Dict=3 , __A : str=4 , __A : str=None , ):
__A : List[Any] = parent
__A : Optional[Any] = batch_size
__A : Tuple = seq_length
__A : int = is_training
__A : str = use_input_mask
__A : List[str] = use_token_type_ids
__A : Dict = use_labels
__A : Optional[int] = vocab_size
__A : Dict = hidden_size
__A : List[Any] = num_hidden_layers
__A : Dict = num_attention_heads
__A : Union[str, Any] = intermediate_size
__A : Any = hidden_act
__A : List[Any] = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Tuple = max_position_embeddings
__A : List[str] = type_vocab_size
__A : Dict = type_sequence_label_size
__A : List[Any] = initializer_range
__A : int = num_labels
__A : Dict = num_choices
__A : Union[str, Any] = scope
def lowerCAmelCase_ ( self : List[Any] ):
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[Any] = None
if self.use_input_mask:
__A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Union[str, Any] = None
__A : int = None
__A : List[Any] = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__A : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : List[Any] ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str , __A : Optional[Any] , __A : str , __A : List[Any] , __A : Any , __A : Tuple ):
__A : Any = DistilBertModel(config=__A )
model.to(__A )
model.eval()
__A : Optional[Any] = model(__A , __A )
__A : List[str] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __A : Tuple , __A : Optional[int] , __A : Tuple , __A : List[Any] , __A : Any , __A : List[str] ):
__A : Optional[int] = DistilBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
__A : Optional[int] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict , __A : Any , __A : Any , __A : Optional[int] , __A : Optional[int] , __A : List[Any] ):
__A : List[str] = DistilBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__A : int = model(
__A , attention_mask=__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : int , __A : Optional[Any] , __A : List[Any] , __A : str , __A : Optional[Any] , __A : Optional[int] , __A : List[Any] ):
__A : Dict = self.num_labels
__A : Optional[Any] = DistilBertForSequenceClassification(__A )
model.to(__A )
model.eval()
__A : Optional[Any] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __A : str , __A : Any , __A : Tuple , __A : Tuple , __A : Tuple , __A : Any ):
__A : Any = self.num_labels
__A : Any = DistilBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
__A : Dict = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __A : Optional[Any] , __A : Dict , __A : List[Any] , __A : Optional[int] , __A : Tuple , __A : Any ):
__A : List[Any] = self.num_choices
__A : Any = DistilBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__A : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Optional[Any] = model(
__A , attention_mask=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Union[str, Any] = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) : Any = config_and_inputs
__A : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : Union[str, Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase : List[str] = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Dict = True
_lowercase : List[Any] = True
_lowercase : str = True
_lowercase : Dict = True
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Optional[int] = DistilBertModelTester(self )
__A : Dict = ConfigTester(self , config_class=__A , dim=37 )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__A )
def lowerCAmelCase_ ( self : Dict ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__A )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A )
def lowerCAmelCase_ ( self : str ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__A )
def lowerCAmelCase_ ( self : str ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A )
@slow
def lowerCAmelCase_ ( self : int ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Union[str, Any] = DistilBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def lowerCAmelCase_ ( self : int ):
__A , __A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__A : List[str] = True
__A : List[str] = model_class(config=__A )
__A : Any = self._prepare_for_class(__A , __A )
__A : Tuple = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
__A : List[str] = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Dict = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__A : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__A : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A : Tuple = model(__A , attention_mask=__A )[0]
__A : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
__A : Optional[int] = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 17 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[str]=1 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = q_groups
__A = k_groups
__A = v_groups
__A = post_attention_groups
__A = intermediate_groups
__A = output_groups
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = SqueezeBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = SqueezeBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self.num_choices
__A = SqueezeBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = SqueezeBertModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SqueezeBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
__A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__A = model(UpperCamelCase_ )[0]
__A = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase_ )
__A = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-4 ) )
| 637 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __A ( A_ ):
UpperCamelCase :Tuple = '''data2vec-vision'''
def __init__(self , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1E-12 , __magic_name__=224 , __magic_name__=16 , __magic_name__=3 , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=True , __magic_name__=[3, 5, 7, 11] , __magic_name__=[1, 2, 3, 6] , __magic_name__=True , __magic_name__=0.4 , __magic_name__=256 , __magic_name__=1 , __magic_name__=False , __magic_name__=255 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Union[str, Any] = use_mask_token
lowerCamelCase__ : Any = use_absolute_position_embeddings
lowerCamelCase__ : Any = use_relative_position_bias
lowerCamelCase__ : Tuple = use_shared_relative_position_bias
lowerCamelCase__ : str = layer_scale_init_value
lowerCamelCase__ : Dict = drop_path_rate
lowerCamelCase__ : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ : Dict = out_indices
lowerCamelCase__ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : List[str] = use_auxiliary_head
lowerCamelCase__ : Optional[Any] = auxiliary_loss_weight
lowerCamelCase__ : int = auxiliary_channels
lowerCamelCase__ : List[Any] = auxiliary_num_convs
lowerCamelCase__ : int = auxiliary_concat_input
lowerCamelCase__ : Dict = semantic_loss_ignore_index
class __A ( A_ ):
UpperCamelCase :Union[str, Any] = version.parse('''1.11''' )
@property
def _snake_case (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case (self ):
return 1E-4
| 96 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __A ( A_ ):
def _snake_case (self ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__ , """width_multiplier""" ) )
class __A :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=64 , __magic_name__=2 , __magic_name__=3 , __magic_name__="swish" , __magic_name__=3 , __magic_name__=32 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=10 , __magic_name__=None , __magic_name__=0.25 , __magic_name__=0.0 , __magic_name__=0.0 , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Union[str, Any] = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Union[str, Any] = conv_kernel_size
lowerCamelCase__ : int = output_stride
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Union[str, Any] = attn_dropout
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : str = None
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case (self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Any = MobileViTVaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[int] = MobileViTVaForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Dict = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : Any = MobileViTVaForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : Dict = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case (self ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase :Optional[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase :List[Any] = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase :List[Any] = False
UpperCamelCase :int = False
UpperCamelCase :Optional[int] = False
UpperCamelCase :int = False
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = MobileViTVaModelTester(self )
lowerCamelCase__ : Optional[int] = MobileViTVaConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def _snake_case (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def _snake_case (self ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def _snake_case (self ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def _snake_case (self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def _snake_case (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case (self ):
pass
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = model_class(__magic_name__ )
lowerCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _snake_case (self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase__ : str = outputs.hidden_states
lowerCamelCase__ : Dict = 5
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : Dict = 2
for i in range(len(__magic_name__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Tuple = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@slow
def _snake_case (self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = MobileViTVaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _A () ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def _snake_case (self ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def _snake_case (self ):
lowerCamelCase__ : str = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__magic_name__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : Any = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**__magic_name__ )
# verify the logits
lowerCamelCase__ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Dict = model.to(__magic_name__ )
lowerCamelCase__ : str = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : str = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**__magic_name__ )
lowerCamelCase__ : List[str] = outputs.logits
# verify the logits
lowerCamelCase__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __magic_name__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(__magic_name__ )
lowerCamelCase__ : Optional[int] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**__magic_name__ )
lowerCamelCase__ : str = outputs.logits.detach().cpu()
lowerCamelCase__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
lowerCamelCase__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 96 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
a__ : Union[str, Any] = state_dict.pop(__a )
a__ : List[str] = val
def UpperCamelCase_ ( __a ) -> List[str]:
a__ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a__ : str = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
a__ : Tuple = value
else:
a__ : List[str] = value
return new_state_dict
def UpperCamelCase_ ( __a , __a=False ) -> Any:
a__ : Tuple = ""
if is_panoptic:
a__ : List[Any] = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a__ : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : Tuple = in_proj_weight[:256, :]
a__ : Any = in_proj_bias[:256]
a__ : Tuple = in_proj_weight[256:512, :]
a__ : List[str] = in_proj_bias[256:512]
a__ : Any = in_proj_weight[-256:, :]
a__ : Optional[int] = in_proj_bias[-256:]
def UpperCamelCase_ ( ) -> Tuple:
a__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( __a , __a ) -> Dict:
a__ : Tuple = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
a__ : Tuple = "resnet101"
if "dc5" in model_name:
a__ : str = True
a__ : int = "panoptic" in model_name
if is_panoptic:
a__ : List[str] = 250
else:
a__ : str = 91
a__ : Union[str, Any] = "huggingface/label-files"
a__ : int = "coco-detection-id2label.json"
a__ : Optional[Any] = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
a__ : Any = {int(__a ): v for k, v in idalabel.items()}
a__ : Union[str, Any] = idalabel
a__ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load image processor
a__ : Dict = "coco_panoptic" if is_panoptic else "coco_detection"
a__ : int = ConditionalDetrImageProcessor(format=__a )
# prepare image
a__ : Any = prepare_img()
a__ : Dict = image_processor(images=__a , return_tensors="pt" )
a__ : List[Any] = encoding["pixel_values"]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
a__ : Optional[Any] = torch.hub.load("DeppMeng/ConditionalDETR" , __a , pretrained=__a ).eval()
a__ : List[str] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
a__ : str = "conditional_detr." + src
rename_key(__a , __a , __a )
a__ : Tuple = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a , is_panoptic=__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ : str = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a__ : Any = state_dict.pop(__a )
a__ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ : Dict = state_dict.pop(__a )
a__ : Tuple = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a__ : Optional[Any] = state_dict.pop(__a )
a__ : Dict = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a__ : str = state_dict.pop(__a )
a__ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
a__ : str = ConditionalDetrForSegmentation(__a ) if is_panoptic else ConditionalDetrForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
model.push_to_hub(repo_id=__a , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
a__ : Dict = conditional_detr(__a )
a__ : int = model(__a )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
UpperCamelCase : Optional[int] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 37 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37 | 1 |
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] ) -> Optional[int]:
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ) -> List[str]:
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def lowercase ( self : List[str] ) -> Optional[int]:
return self.value
def lowercase ( self : Tuple ) -> int:
return self.name
def lowercase ( self : str ) -> Optional[int]:
return self.weight
def lowercase ( self : Dict ) -> Optional[Any]:
return self.value / self.weight
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = []
for i in range(len(snake_case)):
menu.append(Things(name[i], value[i], weight[i]))
return menu
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = sorted(snake_case, key=snake_case, reverse=snake_case)
__snake_case = []
__snake_case = 0.0, 0.0
for i in range(len(snake_case)):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i])
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 | """simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def SCREAMING_SNAKE_CASE ( snake_case, snake_case = True, snake_case = math.inf, snake_case = -math.inf, snake_case = math.inf, snake_case = -math.inf, snake_case = False, snake_case = 1_00, snake_case = 0.01, snake_case = 1, ):
__snake_case = False
__snake_case = search_prob
__snake_case = start_temperate
__snake_case = []
__snake_case = 0
__snake_case = None
while not search_end:
__snake_case = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case = current_state
scores.append(snake_case)
iterations += 1
__snake_case = None
__snake_case = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case = random.randint(0, len(snake_case) - 1) # picking a random neighbor
__snake_case = neighbors.pop(snake_case)
__snake_case = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case = picked_neighbor
else:
__snake_case = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case = picked_neighbor
__snake_case = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case = True
else:
__snake_case = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case), snake_case)
plt.xlabel('''Iterations''')
plt.ylabel('''Function values''')
plt.show()
return best_state
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowercase : Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
__lowercase : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return (3 * x**2) - (6 * y)
__lowercase : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
__lowercase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase : Tuple = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
) | 93 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=2 , A_=24 , A_=16 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , A_=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = num_mel_bins
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = frequency_stride
SCREAMING_SNAKE_CASE__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ = num_patches + 2
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ASTModel(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Tuple = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''input_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = ASTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __snake_case ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = torchaudio.load(lowerCAmelCase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(A_ )
SCREAMING_SNAKE_CASE__ = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_audio()
SCREAMING_SNAKE_CASE__ = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ = feature_extractor(A_ , sampling_rate=A_ , return_tensors='''pt''' ).to(A_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**A_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , A_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 100 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowercase = logging.get_logger(__name__)
class __A :
UpperCamelCase :Union[str, Any] = None
@experimental
def _A (UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] ) ->List[str]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return _map_with_joblib(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _A (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : str ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ : Dict = num_proc if num_proc <= len(UpperCamelCase ) else len(UpperCamelCase )
lowerCamelCase__ : List[Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase ) // num_proc
lowerCamelCase__ : Optional[int] = len(UpperCamelCase ) % num_proc
lowerCamelCase__ : List[str] = div * index + min(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : int = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(UpperCamelCase )}, "
f"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
f"Spawning {num_proc} processes for {len(UpperCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
lowerCamelCase__ ,lowerCamelCase__ : List[str] = None, None
if not disable_tqdm:
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = (RLock(),), tqdm.set_lock
with Pool(UpperCamelCase , initargs=UpperCamelCase , initializer=UpperCamelCase ) as pool:
lowerCamelCase__ : List[Any] = pool.map(UpperCamelCase , UpperCamelCase )
logger.info(f"Finished {num_proc} processes" )
lowerCamelCase__ : Any = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(UpperCamelCase )} objects" )
return mapped
def _A (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=UpperCamelCase ):
return joblib.Parallel()(
joblib.delayed(UpperCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _A (UpperCamelCase : str ) ->Any:
'''simple docstring'''
lowerCamelCase__ : str = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ : Tuple = None
| 157 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Any ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = k_size // 2
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_SCREAMING_SNAKE_CASE : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase__ ) + square(lowerCamelCase__ )) / (2 * square(lowerCamelCase__ )) )
return g
def _lowerCAmelCase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Any, lowerCamelCase__ : int ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = image.shape[0], image.shape[1]
# dst image height and width
_SCREAMING_SNAKE_CASE : Union[str, Any] = height - k_size + 1
_SCREAMING_SNAKE_CASE : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_SCREAMING_SNAKE_CASE : int = zeros((dst_height * dst_width, k_size * k_size) )
_SCREAMING_SNAKE_CASE : Dict = 0
for i, j in product(range(lowerCamelCase__ ), range(lowerCamelCase__ ) ):
_SCREAMING_SNAKE_CASE : int = ravel(image[i : i + k_size, j : j + k_size] )
_SCREAMING_SNAKE_CASE : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
_SCREAMING_SNAKE_CASE : Union[str, Any] = gen_gaussian_kernel(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = ravel(lowerCamelCase__ )
# reshape and get the dst image
_SCREAMING_SNAKE_CASE : Optional[Any] = dot(lowerCamelCase__, lowerCamelCase__ ).reshape(lowerCamelCase__, lowerCamelCase__ ).astype(lowerCamelCase__ )
return dst
if __name__ == "__main__":
# read original image
lowercase_ : List[str] = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase_ : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase_ : Optional[Any] = gaussian_filter(gray, 3, sigma=1)
lowercase_ : Any = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 295 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : Any=1_0_2_4 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = [], []
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(zip(lowerCamelCase__, lowerCamelCase__ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = sorted_examples[0]
def is_too_big(lowerCamelCase__ : List[Any] ):
return tok(lowerCamelCase__, return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_SCREAMING_SNAKE_CASE : int = new_src + " " + src
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_tgt + " " + tgt
if is_too_big(lowerCamelCase__ ) or is_too_big(lowerCamelCase__ ): # cant fit, finalize example
finished_src.append(lowerCamelCase__ )
finished_tgt.append(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = src, tgt
else: # can fit, keep adding
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCamelCase__ )
finished_tgt.append(lowerCamelCase__ )
return finished_src, finished_tgt
def _lowerCAmelCase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Path, lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = Path(lowerCamelCase__ )
save_path.mkdir(exist_ok=lowerCamelCase__ )
for split in ["train"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
_SCREAMING_SNAKE_CASE : Any = [x.rstrip() for x in Path(lowerCamelCase__ ).open().readlines()]
_SCREAMING_SNAKE_CASE : int = [x.rstrip() for x in Path(lowerCamelCase__ ).open().readlines()]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = pack_examples(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
print(f'''packed {split} split from {len(lowerCamelCase__ )} examples -> {len(lowerCamelCase__ )}.''' )
Path(save_path / f'''{split}.source''' ).open("w" ).write("\n".join(lowerCamelCase__ ) )
Path(save_path / f'''{split}.target''' ).open("w" ).write("\n".join(lowerCamelCase__ ) )
for split in ["val", "test"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(lowerCamelCase__, save_path / f'''{split}.source''' )
shutil.copyfile(lowerCamelCase__, save_path / f'''{split}.target''' )
def _lowerCAmelCase ( ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=lowerCamelCase__, help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len", type=lowerCamelCase__, default=1_2_8 )
parser.add_argument("--data_dir", type=lowerCamelCase__ )
parser.add_argument("--save_path", type=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCamelCase__, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 295 | 1 |
'''simple docstring'''
from __future__ import annotations
_a : Dict = list[tuple[int, int]]
_a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a : int = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowercase :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Node | None , ) -> Optional[int]:
__snake_case = pos_x
__snake_case = pos_y
__snake_case = (pos_y, pos_x)
__snake_case = goal_x
__snake_case = goal_y
__snake_case = g_cost
__snake_case = parent
__snake_case = self.calculate_heuristic()
def a ( self : Tuple ) -> float:
__snake_case = abs(self.pos_x - self.goal_x )
__snake_case = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> bool:
return self.f_cost < other.f_cost
class _lowercase :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : tuple[int, int] ) -> str:
__snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_ )
__snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , SCREAMING_SNAKE_CASE_ )
__snake_case = [self.start]
__snake_case = []
__snake_case = False
def a ( self : Optional[int] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__snake_case = True
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
__snake_case = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.start.pos]
return None
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Node ) -> list[Node]:
__snake_case = []
for action in delta:
__snake_case = parent.pos_x + action[1]
__snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , ) )
return successors
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Node | None ) -> Path:
__snake_case = node
__snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_a : List[str] = (0, 0)
_a : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_a : int = GreedyBestFirst(init, goal)
_a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_a : Union[str, Any] = 2
for elem in grid:
print(elem)
| 56 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Tuple , *__lowercase : Optional[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : Union[str, Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *__lowercase : int , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[str] , *__lowercase : Optional[int] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *__lowercase : str , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : List[str] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : int , *__lowercase : List[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : int ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *__lowercase : List[Any] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *__lowercase : Any , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : int , *__lowercase : List[str] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : int , *__lowercase : Tuple , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : Dict , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : str , *__lowercase : Tuple , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *__lowercase : int , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[str] , *__lowercase : List[str] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *__lowercase : Any , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *__lowercase : int , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : Any , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *__lowercase : Dict , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *__lowercase : Union[str, Any] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : List[str] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : Optional[int] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *__lowercase : str , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : List[str] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Tuple , *__lowercase : int , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : Union[str, Any] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Tuple , *__lowercase : Union[str, Any] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : str , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
| 139 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = BertConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
snake_case_ = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 139 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''align_text_model'''
def __init__( self : Optional[int] , _A : Union[str, Any]=3_0522 , _A : List[str]=768 , _A : List[Any]=12 , _A : int=12 , _A : str=3072 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=0.1 , _A : List[str]=0.1 , _A : List[str]=512 , _A : Union[str, Any]=2 , _A : Optional[int]=0.02 , _A : Optional[int]=1e-12 , _A : List[str]=0 , _A : Optional[Any]="absolute" , _A : Tuple=True , **_A : str , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : Tuple = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = pad_token_id
@classmethod
def UpperCAmelCase__ ( cls : int , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__SCREAMING_SNAKE_CASE : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_A , **_A )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''align_vision_model'''
def __init__( self : Dict , _A : int = 3 , _A : int = 600 , _A : float = 2.0 , _A : float = 3.1 , _A : int = 8 , _A : List[int] = [3, 3, 5, 3, 5, 5, 3] , _A : List[int] = [32, 16, 24, 40, 80, 112, 192] , _A : List[int] = [16, 24, 40, 80, 112, 192, 320] , _A : List[int] = [] , _A : List[int] = [1, 2, 2, 2, 1, 2, 1] , _A : List[int] = [1, 2, 2, 3, 3, 4, 1] , _A : List[int] = [1, 6, 6, 6, 6, 6, 6] , _A : float = 0.25 , _A : str = "swish" , _A : int = 2560 , _A : str = "mean" , _A : float = 0.02 , _A : float = 0.0_01 , _A : float = 0.99 , _A : float = 0.2 , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Optional[int] = num_channels
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Any = width_coefficient
__SCREAMING_SNAKE_CASE : Union[str, Any] = depth_coefficient
__SCREAMING_SNAKE_CASE : Tuple = depth_divisor
__SCREAMING_SNAKE_CASE : Any = kernel_sizes
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = out_channels
__SCREAMING_SNAKE_CASE : str = depthwise_padding
__SCREAMING_SNAKE_CASE : List[Any] = strides
__SCREAMING_SNAKE_CASE : List[str] = num_block_repeats
__SCREAMING_SNAKE_CASE : Union[str, Any] = expand_ratios
__SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
__SCREAMING_SNAKE_CASE : int = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = pooling_type
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : int = batch_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_norm_momentum
__SCREAMING_SNAKE_CASE : Optional[int] = drop_connect_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = sum(_A ) * 4
@classmethod
def UpperCAmelCase__ ( cls : Any , _A : Union[str, os.PathLike] , **_A : Optional[Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__SCREAMING_SNAKE_CASE : List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_A , **_A )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''align'''
lowerCAmelCase_ = True
def __init__( self : str , _A : Optional[int]=None , _A : Optional[int]=None , _A : Dict=640 , _A : int=1.0 , _A : Any=0.02 , **_A : int , ):
"""simple docstring"""
super().__init__(**_A )
if text_config is None:
__SCREAMING_SNAKE_CASE : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__SCREAMING_SNAKE_CASE : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__SCREAMING_SNAKE_CASE : Tuple = AlignTextConfig(**_A )
__SCREAMING_SNAKE_CASE : int = AlignVisionConfig(**_A )
__SCREAMING_SNAKE_CASE : Optional[int] = projection_dim
__SCREAMING_SNAKE_CASE : str = temperature_init_value
__SCREAMING_SNAKE_CASE : Any = initializer_range
@classmethod
def UpperCAmelCase__ ( cls : Dict , _A : AlignTextConfig , _A : AlignVisionConfig , **_A : str ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE : int = self.__class__.model_type
return output
| 74 | from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int , snake_case__ : int ) -> Any:
_lowerCamelCase = [[] for _ in range(snake_case__ )]
_lowerCamelCase = size
def __getitem__( self : Dict , snake_case__ : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self : str ) -> int:
return self._size
def _snake_case ( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Tuple:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) )
def _snake_case ( self : Dict , snake_case__ : int , snake_case__ : int ) -> int | None:
_lowerCamelCase = deque([start_vertex] )
_lowerCamelCase = [None] * self.size
_lowerCamelCase = 0
while queue:
_lowerCamelCase = queue.popleft()
_lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowerCamelCase = current_distance + edge.weight
_lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(snake_case__ , snake_case__ )
and new_distance >= dest_vertex_distance
):
continue
_lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 544 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCAmelCase_ = None
def lowerCamelCase ( UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : List[int] , ) -> List[Any]:
import pyspark
def generate_fn():
_lowerCamelCase = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
_lowerCamelCase = df_with_partition_id.select('*' ).where(F"""part_id = {partition_id}""" ).drop('part_id' )
_lowerCamelCase = partition_df.collect()
_lowerCamelCase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : int , snake_case__ : "pyspark.sql.DataFrame" , snake_case__ : Optional[int]=None , ) -> List[str]:
_lowerCamelCase = df
_lowerCamelCase = partition_order or range(self.df.rdd.getNumPartitions() )
_lowerCamelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ) -> Optional[Any]:
yield from self.generate_examples_fn()
def _snake_case ( self : Dict , snake_case__ : np.random.Generator ) -> "SparkExamplesIterable":
_lowerCamelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
def _snake_case ( self : List[Any] , snake_case__ : int , snake_case__ : int ) -> "SparkExamplesIterable":
_lowerCamelCase = self.split_shard_indices_by_worker(snake_case__ , snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
@property
def _snake_case ( self : List[str] ) -> int:
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCAmelCase_ = SparkConfig
def __init__( self : Union[str, Any] , snake_case__ : "pyspark.sql.DataFrame" , snake_case__ : str = None , snake_case__ : str = None , **snake_case__ : Union[str, Any] , ) -> str:
import pyspark
_lowerCamelCase = pyspark.sql.SparkSession.builder.getOrCreate()
_lowerCamelCase = df
_lowerCamelCase = working_dir
super().__init__(
cache_dir=snake_case__ , config_name=str(self.df.semanticHash() ) , **snake_case__ , )
def _snake_case ( self : Union[str, Any] ) -> List[str]:
# Returns the path of the created file.
def create_cache_and_write_probe(snake_case__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case__ )
_lowerCamelCase = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowerCamelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self : List[Any] , snake_case__ : datasets.download.download_manager.DownloadManager ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _snake_case ( self : Dict , snake_case__ : int ) -> int:
import pyspark
def get_arrow_batch_size(snake_case__ : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
_lowerCamelCase = self.df.count()
_lowerCamelCase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowerCamelCase = (
self.df.limit(snake_case__ )
.repartition(1 )
.mapInArrow(snake_case__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowerCamelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowerCamelCase = min(snake_case__ , int(approx_total_size / max_shard_size ) )
_lowerCamelCase = self.df.repartition(snake_case__ )
def _snake_case ( self : Optional[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
_lowerCamelCase = ParquetWriter if file_format == 'parquet' else ArrowWriter
_lowerCamelCase = os.path.join(self._working_dir , os.path.basename(snake_case__ ) ) if self._working_dir else fpath
_lowerCamelCase = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowerCamelCase = self.config.features
_lowerCamelCase = self._writer_batch_size
_lowerCamelCase = self._fs.storage_options
def write_arrow(snake_case__ : Any ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowerCamelCase = pyspark.TaskContext().taskAttemptId()
_lowerCamelCase = next(snake_case__ , snake_case__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
_lowerCamelCase = 0
_lowerCamelCase = writer_class(
features=snake_case__ , path=working_fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
_lowerCamelCase = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowerCamelCase , _lowerCamelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
_lowerCamelCase = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
_lowerCamelCase = pa.Table.from_batches([batch] )
writer.write_table(snake_case__ )
if writer._num_bytes > 0:
_lowerCamelCase , _lowerCamelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case__ ) ):
_lowerCamelCase = os.path.join(os.path.dirname(snake_case__ ) , os.path.basename(snake_case__ ) )
shutil.move(snake_case__ , snake_case__ )
_lowerCamelCase = (
self.df.mapInArrow(snake_case__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _snake_case ( self : Any , snake_case__ : "datasets.SplitGenerator" , snake_case__ : str = "arrow" , snake_case__ : Optional[Union[str, int]] = None , snake_case__ : Optional[int] = None , **snake_case__ : Optional[int] , ) -> Dict:
self._validate_cache_dir()
_lowerCamelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case__ )
_lowerCamelCase = not is_remote_filesystem(self._fs )
_lowerCamelCase = os.path.join if is_local else posixpath.join
_lowerCamelCase = '-TTTTT-SSSSS-of-NNNNN'
_lowerCamelCase = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowerCamelCase = path_join(self._output_dir , snake_case__ )
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = []
_lowerCamelCase = []
for task_id, content in self._prepare_split_single(snake_case__ , snake_case__ , snake_case__ ):
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case__ )
_lowerCamelCase = total_num_examples
_lowerCamelCase = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowerCamelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowerCamelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
snake_case__ : int , snake_case__ : int , snake_case__ : int , ):
rename(
snake_case__ , fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , f"""{global_shard_id:05d}""" ).replace('NNNNN' , f"""{total_shards:05d}""" ) , )
_lowerCamelCase = []
_lowerCamelCase = 0
for i in range(len(snake_case__ ) ):
_lowerCamelCase , _lowerCamelCase = task_id_and_num_shards[i]
for shard_id in range(snake_case__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case__ , len(snake_case__ ) ).map(lambda snake_case__ : _rename_shard(*snake_case__ ) ).collect()
else:
# don't use any pattern
_lowerCamelCase = 0
_lowerCamelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , fpath.replace(snake_case__ , '' ) , )
def _snake_case ( self : List[Any] , snake_case__ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df ) | 712 | from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __lt__( self : Optional[Any] , snake_case__ : Optional[int] ) -> Dict:
return self[-1] < other[-1]
def __eq__( self : List[str] , snake_case__ : Tuple ) -> Dict:
return self[-1] == other[-1]
def lowerCamelCase ( UpperCamelCase : list ) -> list:
_lowerCamelCase = []
# sort into stacks
for element in collection:
_lowerCamelCase = Stack([element] )
_lowerCamelCase = bisect_left(UpperCamelCase , UpperCamelCase )
if i != len(UpperCamelCase ):
stacks[i].append(UpperCamelCase )
else:
stacks.append(UpperCamelCase )
# use a heap-based merge to merge stack efficiently
_lowerCamelCase = merge(*(reversed(UpperCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
A = input('Enter numbers separated by a comma:\n').strip()
A = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted)) | 234 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCamelCase : Tuple = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 328 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = old_name
if "patch_embed" in old_name:
snake_case__ , snake_case__ , snake_case__ = old_name.split('.' )
if layer == "0":
snake_case__ = old_name.replace('0' , 'convolution1' )
elif layer == "1":
snake_case__ = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
snake_case__ = old_name.replace('3' , 'convolution2' )
else:
snake_case__ = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , _A ):
snake_case__ = R'\b\d{2}\b'
if bool(re.search(_A , _A ) ):
snake_case__ = re.search(R'\d\.\d\d.' , _A ).group()
else:
snake_case__ = re.search(R'\d\.\d.' , _A ).group()
if int(match[0] ) < 6:
snake_case__ = old_name.replace(_A , '' )
snake_case__ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
snake_case__ = 'intermediate_stages.' + trimmed_name
else:
snake_case__ = old_name.replace(_A , '' )
if int(match[2] ) < num_meta4D_last_stage:
snake_case__ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
snake_case__ = str(int(match[2] ) - num_meta4D_last_stage )
snake_case__ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
snake_case__ = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
snake_case__ = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
snake_case__ = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
snake_case__ = trimmed_name.replace('fc2' , 'linear_out' )
snake_case__ = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , _A ):
snake_case__ = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
snake_case__ = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case__ = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case__ = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
snake_case__ = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
snake_case__ = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
snake_case__ = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
snake_case__ = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case__ = new_name.replace('norm' , 'layernorm' )
snake_case__ = 'efficientformer.' + new_name
else:
snake_case__ = 'efficientformer.encoder.' + new_name
return new_name
def a_ ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
for key in checkpoint.copy().keys():
snake_case__ = checkpoint.pop(_A )
snake_case__ = val
return checkpoint
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
def a_ ( _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = torch.load(_A , map_location='cpu' )['model']
snake_case__ = EfficientFormerConfig.from_json_file(_A )
snake_case__ = EfficientFormerForImageClassificationWithTeacher(_A )
snake_case__ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
snake_case__ = config.depths[-1] - config.num_metaad_blocks + 1
snake_case__ = convert_torch_checkpoint(_A , _A )
model.load_state_dict(_A )
model.eval()
snake_case__ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
snake_case__ = prepare_img()
snake_case__ = 256
snake_case__ = 224
snake_case__ = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
snake_case__ = processor(images=_A , return_tensors='pt' ).pixel_values
# original processing pipeline
snake_case__ = Compose(
[
Resize(_A , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_A ),
ToTensor(),
Normalize(_A , _A ),
] )
snake_case__ = image_transforms(_A ).unsqueeze(0 )
assert torch.allclose(_A , _A )
snake_case__ = model(_A )
snake_case__ = outputs.logits
snake_case__ = (1, 1000)
if "l1" in model_name:
snake_case__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _A , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _A , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_A )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add model' , use_temp_dir=_A , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add image processor' , use_temp_dir=_A , )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
__UpperCamelCase : Dict = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 328 | 1 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a = logging.get_logger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case: Tuple =os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case: Optional[int] =json.loads(__UpperCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case: Dict =os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case: Dict =json.loads(__UpperCAmelCase )
if not mpi_options.get('sagemaker_mpi_enabled' , __UpperCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( snake_case ):
UpperCAmelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def UpperCamelCase ( self : int ) -> str:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , a_ , )
@cached_property
def UpperCamelCase ( self : int ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
snake_case: Optional[int] =torch.device('cpu' )
snake_case: str =0
elif is_sagemaker_model_parallel_available():
snake_case: Any =smp.local_rank()
snake_case: Optional[int] =torch.device('cuda' , a_ )
snake_case: List[Any] =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
snake_case: Optional[Any] =int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
snake_case: int =torch.device('cuda' , self.local_rank )
snake_case: List[str] =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case: Union[str, Any] =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case: int =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
snake_case: Tuple =torch.device('cuda' , self.local_rank )
snake_case: Any =1
if device.type == "cuda":
torch.cuda.set_device(a_ )
return device
@property
def UpperCamelCase ( self : str ) -> List[str]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
return False
| 347 |
'''simple docstring'''
import numpy as np
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: int =int(np.ceil((x_end - xa) / h ) )
snake_case: Optional[int] =np.zeros((n + 1,) )
snake_case: Optional[int] =ya
snake_case: List[str] =xa
for k in range(__UpperCAmelCase ):
snake_case: Optional[int] =f(__UpperCAmelCase , y[k] )
snake_case: Optional[Any] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case: Optional[Any] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case: Optional[Any] =f(x + h , y[k] + h * ka )
snake_case: List[Any] =y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=7 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : int=18 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : Optional[Any]=400 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , ) -> int:
"""simple docstring"""
lowercase__ = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = DPTImageProcessor if is_vision_available() else None
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ = DPTImageProcessingTester(self )
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """size""" ) )
def lowerCamelCase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowercase__ = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowercase__ = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowercase__ = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 15 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 1_28 , lowerCamelCase=[16, 32, 64, 1_28] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.0_2 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 1_28 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = encoder_stride
snake_case__ = num_attention_outputs
snake_case__ = embed_dim
snake_case__ = embed_dim + 1
snake_case__ = resolution
snake_case__ = depths
snake_case__ = hidden_sizes
snake_case__ = dim
snake_case__ = mlp_expansion_ratio
def A_ ( self ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = TFEfficientFormerModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = self.type_sequence_label_size
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_A : str = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_A : List[str] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : List[Any] = False
_A : Tuple = False
_A : List[Any] = False
_A : Any = False
def A_ ( self ):
snake_case__ = TFEfficientFormerModelTester(self )
snake_case__ = ConfigTester(
self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def A_ ( self ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(lowerCamelCase )
snake_case__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def A_ ( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case__ = seq_length * self.model_tester.chunk_length
else:
snake_case__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case__ = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
snake_case__ = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def A_ ( self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFEfficientFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = True
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "key_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "chunk_length" , lowerCamelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case__ = True
snake_case__ = False
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def A_ ( self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case__ = model_class(lowerCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case__ = model(lowerCamelCase )
self.assertTrue(outputs_dict is not None )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def A_ ( self ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 276 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 0.99_99 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = False , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = 2 / 3 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Tuple:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_snake_case = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_snake_case = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_snake_case = True
if kwargs.get('max_value' , __UpperCamelCase ) is not None:
_snake_case = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_snake_case = kwargs['max_value']
if kwargs.get('min_value' , __UpperCamelCase ) is not None:
_snake_case = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_snake_case = kwargs['min_value']
_snake_case = list(__UpperCamelCase )
_snake_case = [p.clone().detach() for p in parameters]
if kwargs.get('device' , __UpperCamelCase ) is not None:
_snake_case = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , __UpperCamelCase , standard_warn=__UpperCamelCase )
self.to(device=kwargs['device'] )
_snake_case = None
_snake_case = decay
_snake_case = min_decay
_snake_case = update_after_step
_snake_case = use_ema_warmup
_snake_case = inv_gamma
_snake_case = power
_snake_case = 0
_snake_case = None # set in `step()`
_snake_case = model_cls
_snake_case = model_config
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ ) -> "EMAModel":
_snake_case , _snake_case = model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase )
_snake_case = model_cls.from_pretrained(__UpperCamelCase )
_snake_case = cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCamelCase )
return ema_model
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_snake_case = self.model_cls.from_config(self.model_config )
_snake_case = self.state_dict()
state_dict.pop('shadow_params' , __UpperCamelCase )
model.register_to_config(**__UpperCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCamelCase )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> float:
_snake_case = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_snake_case = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_snake_case = (1 + step) / (10 + step)
_snake_case = min(__UpperCamelCase , self.decay )
# make sure decay is not smaller than min_decay
_snake_case = max(__UpperCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_snake_case = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_snake_case = parameters.parameters()
_snake_case = list(__UpperCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_snake_case = self.get_decay(self.optimization_step )
_snake_case = decay
_snake_case = 1 - decay
_snake_case = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_snake_case = deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCamelCase )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
_snake_case = list(__UpperCamelCase )
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> None:
_snake_case = [
p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase )
for p in self.shadow_params
]
def lowerCAmelCase ( self ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
_snake_case = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_snake_case = None
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
_snake_case = copy.deepcopy(__UpperCamelCase )
_snake_case = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_snake_case = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , __UpperCamelCase ):
raise ValueError('Invalid min_decay' )
_snake_case = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCamelCase ):
raise ValueError('Invalid optimization_step' )
_snake_case = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCamelCase ):
raise ValueError('Invalid update_after_step' )
_snake_case = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCamelCase ):
raise ValueError('Invalid use_ema_warmup' )
_snake_case = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
_snake_case = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
_snake_case = state_dict.get('shadow_params' , __UpperCamelCase )
if shadow_params is not None:
_snake_case = shadow_params
if not isinstance(self.shadow_params , __UpperCamelCase ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 707 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> List[Any]:
debug_launcher(test_script.main )
def lowerCAmelCase ( self ) -> Optional[Any]:
debug_launcher(test_ops.main )
| 541 | 0 |
"""simple docstring"""
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase_ = os.path.join(git_repo_path, "src", "transformers")
lowercase_ = '\n{0} = None\n'
lowercase_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
lowercase_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def a ( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(__a )
UpperCAmelCase_ : Tuple = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(__a , """tokenizers""" )
UpperCAmelCase_ : Tuple = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(__a , """tensorflow_text""" )
UpperCAmelCase_ : Tuple = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(__a , """sentencepiece_and_tokenizers""" )
UpperCAmelCase_ : List[str] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(__a , """sentencepiece_and_tensorflow_text""" )
UpperCAmelCase_ : Dict = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(__a , """sentencepiece_and_tokenizers_and_vision""" )
def a ( self : List[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , __a )
self.assertIn("""tensorflow_text""" , __a )
self.assertIn("""sentencepiece_and_tokenizers""" , __a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def a ( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(__a , """\nCONSTANT = None\n""" )
UpperCAmelCase_ : Tuple = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
__a , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
UpperCAmelCase_ : Dict = """\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"""
UpperCAmelCase_ : List[str] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(__a , __a )
def a ( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = """# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"""
UpperCAmelCase_ : int = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , __a )
| 470 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , *__a : Tuple , **__a : Optional[Any] ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = OpenAIGPTTokenizer
snake_case_ = OpenAIGPTTokenizerFast
snake_case_ = True
snake_case_ = False
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase__ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCamelCase__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def _UpperCamelCase ( self : str , a_ : Tuple ):
"""simple docstring"""
return "lower newer", "lower newer"
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ = """lower"""
lowerCamelCase__ = ["""low""", """er</w>"""]
lowerCamelCase__ = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ = tokens + ["""<unk>"""]
lowerCamelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _UpperCamelCase ( self : Tuple , a_ : str=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
lowerCamelCase__ = """This is a simple input"""
lowerCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase__ = ("""This is a simple input""", """This is a pair""")
lowerCamelCase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowercase ( lowerCAmelCase__ ):
"""simple docstring"""
pass
| 712 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(a_ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(a_ , """num_attention_heads""" ) )
class lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Dict , a_ : Tuple=13 , a_ : Any=32 , a_ : Optional[int]=2 , a_ : Optional[int]=3 , a_ : List[Any]=6_40 , a_ : Optional[int]=4 , a_ : Dict="silu" , a_ : List[Any]=3 , a_ : Union[str, Any]=32 , a_ : Optional[int]=0.1 , a_ : Any=0.1 , a_ : List[str]=0.1 , a_ : str=0.0_2 , a_ : str=True , a_ : Optional[int]=True , a_ : List[Any]=10 , a_ : Tuple=None , ):
"""simple docstring"""
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = last_hidden_size
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = conv_kernel_size
lowerCamelCase__ = output_stride
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = use_labels
lowerCamelCase__ = is_training
lowerCamelCase__ = num_labels
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple , a_ : Dict , a_ : Dict , a_ : Dict , a_ : str ):
"""simple docstring"""
lowerCamelCase__ = MobileViTModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCamelCase ( self : str , a_ : Tuple , a_ : int , a_ : str , a_ : Tuple ):
"""simple docstring"""
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileViTForImageClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Optional[Any] , a_ : List[Any] , a_ : str , a_ : List[str] , a_ : int ):
"""simple docstring"""
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileViTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case_ = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = MobileViTModelTester(self )
lowerCamelCase__ = MobileViTConfigTester(self , config_class=a_ , has_text_modality=a_ )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(a_ )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
def check_hidden_states_output(a_ : Any , a_ : List[str] , a_ : str ):
lowerCamelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(a_ , a_ ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = 5
self.assertEqual(len(a_ ) , a_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ = 2
for i in range(len(a_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(a_ , a_ , a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@slow
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = MobileViTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(a_ )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
# verify the logits
lowerCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCamelCase__ = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = model.to(a_ )
lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
lowerCamelCase__ = outputs.logits
# verify the logits
lowerCamelCase__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , a_ )
lowerCamelCase__ = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=a_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = model.to(a_ )
lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
lowerCamelCase__ = outputs.logits.detach().cpu()
lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(50, 60)] )
lowerCamelCase__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , a_ )
lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=a_ )
lowerCamelCase__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , a_ )
| 235 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return number | (1 << position)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return number & ~(1 << position)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return number ^ (1 << position)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase__ = '''src/diffusers'''
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowerCamelCase__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowerCamelCase__ = '''
{0} = None
'''
lowerCamelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowerCamelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : Tuple =_re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowercase_ ( ):
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : int =f.readlines()
# Get to the point we do the actual imports for type checking
snake_case__ : Optional[Any] =0
snake_case__ : Any ={}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case__ : List[str] =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
snake_case__ : List[Any] =[]
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
snake_case__ : List[str] =lines[line_index]
snake_case__ : Any =_re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : List[Any] =objects
else:
line_index += 1
return backend_specific_objects
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
if backend_specific_objects is None:
snake_case__ : int =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case__ : Dict ={}
for backend, objects in backend_specific_objects.items():
snake_case__ : str ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
snake_case__ : List[Any] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
snake_case__ : int =dummy_file
return dummy_files
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int]=False ):
"""simple docstring"""
snake_case__ : Dict =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case__ : int ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
snake_case__ : List[Any] =os.path.join(SCREAMING_SNAKE_CASE , '''utils''' )
snake_case__ : str ={
backend: os.path.join(SCREAMING_SNAKE_CASE , F'''dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py''' )
for backend in dummy_files.keys()
}
snake_case__ : Tuple ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Optional[int] =f.read()
else:
snake_case__ : Union[str, Any] =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 381 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
lowerCamelCase__ = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowerCAmelCase__ ( UpperCamelCase_ ):
UpperCamelCase_ : Any = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : int = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str = SqueezeBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> int:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase__ ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(UpperCamelCase__ , normalizer_state.pop("""type""" ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**UpperCamelCase__ )
_UpperCamelCase = do_lower_case
def A_ ( self , a , a=None ) -> int:
'''simple docstring'''
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self , a , a = None ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , a , a = None ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 712 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = nn.functional.normalize(lowerCAmelCase )
_UpperCamelCase = nn.functional.normalize(lowerCAmelCase )
return torch.mm(lowerCAmelCase , normalized_text_embeds.t() )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Tuple = CLIPConfig
UpperCamelCase_ : str = ["CLIPEncoderLayer"]
def __init__( self , a ) -> Optional[Any]:
'''simple docstring'''
super().__init__(a )
_UpperCamelCase = CLIPVisionModel(config.vision_config )
_UpperCamelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=a )
_UpperCamelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=a )
_UpperCamelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=a )
_UpperCamelCase = nn.Parameter(torch.ones(17 ) , requires_grad=a )
_UpperCamelCase = nn.Parameter(torch.ones(3 ) , requires_grad=a )
@torch.no_grad()
def A_ ( self , a , a ) -> str:
'''simple docstring'''
_UpperCamelCase = self.vision_model(a )[1] # pooled_output
_UpperCamelCase = self.visual_projection(a )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = cosine_distance(a , self.special_care_embeds ).cpu().float().numpy()
_UpperCamelCase = cosine_distance(a , self.concept_embeds ).cpu().float().numpy()
_UpperCamelCase = []
_UpperCamelCase = image_embeds.shape[0]
for i in range(a ):
_UpperCamelCase = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_UpperCamelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_UpperCamelCase = special_cos_dist[i][concept_idx]
_UpperCamelCase = self.special_care_embeds_weights[concept_idx].item()
_UpperCamelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
_UpperCamelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_UpperCamelCase = cos_dist[i][concept_idx]
_UpperCamelCase = self.concept_embeds_weights[concept_idx].item()
_UpperCamelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(a )
result.append(a )
_UpperCamelCase = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def A_ ( self , a , a ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.vision_model(a )[1] # pooled_output
_UpperCamelCase = self.visual_projection(a )
_UpperCamelCase = cosine_distance(a , self.special_care_embeds )
_UpperCamelCase = cosine_distance(a , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_UpperCamelCase = 0.0
_UpperCamelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_UpperCamelCase = torch.any(special_scores > 0 , dim=1 )
_UpperCamelCase = special_care * 0.01
_UpperCamelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_UpperCamelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_UpperCamelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 202 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
_lowerCamelCase : Any = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_lowerCamelCase : str = BASE_URL + '''/user'''
# https://github.com/settings/tokens
_lowerCamelCase : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def A__ ( __A : str ) ->dict[Any, Any]:
__A ={
'''Authorization''': F'''token {auth_token}''',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(__A , headers=__A ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 184 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = 1 / 2_5_5 , lowercase__ = True , lowercase__ = None , lowercase__ = True , **lowercase__ , ):
'''simple docstring'''
super().__init__(**lowercase__ )
__A =size if size is not None else {'''shortest_edge''': 2_2_4}
__A =get_size_dict(lowercase__ , default_to_square=lowercase__ )
__A =crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__A =get_size_dict(lowercase__ , param_name='''crop_size''' )
__A =do_resize
__A =size
__A =resample
__A =do_rescale
__A =rescale_factor
__A =do_center_crop
__A =crop_size
__A =do_flip_channel_order
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = PIL.Image.BILINEAR , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
__A =get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
__A =get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
__A =get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
return flip_channel_order(lowercase__ , data_format=lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
'''simple docstring'''
__A =do_resize if do_resize is not None else self.do_resize
__A =resample if resample is not None else self.resample
__A =do_rescale if do_rescale is not None else self.do_rescale
__A =rescale_factor if rescale_factor is not None else self.rescale_factor
__A =do_center_crop if do_center_crop is not None else self.do_center_crop
__A =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__A =size if size is not None else self.size
__A =get_size_dict(lowercase__ , default_to_square=lowercase__ )
__A =crop_size if crop_size is not None else self.crop_size
__A =get_size_dict(lowercase__ , param_name='''crop_size''' )
__A =make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
__A =[to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__A =[self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
__A =[self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
__A =[self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__A =[self.flip_channel_order(image=lowercase__ ) for image in images]
__A =[to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__A ={'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
__A =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase__ ):
__A =target_sizes.numpy()
__A =[]
for idx in range(len(lowercase__ ) ):
__A =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase__ )
__A =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
__A =logits.argmax(dim=1 )
__A =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 184 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any=1_024 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = [], []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(zip(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = sorted_examples[0]
def is_too_big(_lowerCamelCase : int ):
return tok(_lowerCamelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE__ : int = new_src + " " + src
SCREAMING_SNAKE_CASE__ : str = new_tgt + " " + tgt
if is_too_big(_lowerCamelCase ) or is_too_big(_lowerCamelCase ): # cant fit, finalize example
finished_src.append(_lowerCamelCase )
finished_tgt.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowerCamelCase )
finished_tgt.append(_lowerCamelCase )
return finished_src, finished_tgt
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Path , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowerCamelCase )
save_path.mkdir(exist_ok=_lowerCamelCase )
for split in ["train"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
SCREAMING_SNAKE_CASE__ : str = [x.rstrip() for x in Path(_lowerCamelCase ).open().readlines()]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x.rstrip() for x in Path(_lowerCamelCase ).open().readlines()]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = pack_examples(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(f"""packed {split} split from {len(_lowerCamelCase )} examples -> {len(_lowerCamelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(_lowerCamelCase ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(_lowerCamelCase ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(_lowerCamelCase , save_path / f"""{split}.source""" )
shutil.copyfile(_lowerCamelCase , save_path / f"""{split}.target""" )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_lowerCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_lowerCamelCase , default=128 )
parser.add_argument("--data_dir" , type=_lowerCamelCase )
parser.add_argument("--save_path" , type=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution()) | 26 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = k_size // 2
__lowercase , __lowercase : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__lowercase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCAmelCase_ ) + square(lowerCAmelCase_ )) / (2 * square(lowerCAmelCase_ )) )
return g
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] ):
__lowercase , __lowercase : str = image.shape[0], image.shape[1]
# dst image height and width
__lowercase : Optional[int] = height - k_size + 1
__lowercase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__lowercase : str = zeros((dst_height * dst_width, k_size * k_size) )
__lowercase : Union[str, Any] = 0
for i, j in product(range(lowerCAmelCase_ ) , range(lowerCAmelCase_ ) ):
__lowercase : List[str] = ravel(image[i : i + k_size, j : j + k_size] )
__lowercase : str = window
row += 1
# turn the kernel into shape(k*k, 1)
__lowercase : List[Any] = gen_gaussian_kernel(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Any = ravel(lowerCAmelCase_ )
# reshape and get the dst image
__lowercase : List[str] = dot(lowerCAmelCase_ , lowerCAmelCase_ ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ).astype(lowerCAmelCase_ )
return dst
if __name__ == "__main__":
# read original image
lowerCamelCase : str = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCamelCase : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCamelCase : Tuple = gaussian_filter(gray, 3, sigma=1)
lowerCamelCase : int = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey() | 149 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase : str = None
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
lowerCamelCase : List[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
lowerCamelCase : Union[str, Any] = '''▁'''
# Segments (not really needed)
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : List[Any] = 2
lowerCamelCase : List[Any] = 3
lowerCamelCase : Dict = 4
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Any = '''left'''
_A : List[str] = XLNetTokenizer
def __init__( self : int , __a : Union[str, Any]=None , __a : Optional[int]=None , __a : List[Any]=False , __a : Tuple=True , __a : Tuple=False , __a : List[str]="<s>" , __a : int="</s>" , __a : Optional[int]="<unk>" , __a : Any="<sep>" , __a : Dict="<pad>" , __a : str="<cls>" , __a : List[str]="<mask>" , __a : Optional[int]=["<eop>", "<eod>"] , **__a : Any , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
__lowercase : str = 3
__lowercase : Optional[Any] = do_lower_case
__lowercase : Union[str, Any] = remove_space
__lowercase : List[str] = keep_accents
__lowercase : Optional[Any] = vocab_file
__lowercase : Union[str, Any] = False if not self.vocab_file else True
def lowerCAmelCase ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = [self.sep_token_id]
__lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Dict = [self.sep_token_id]
__lowercase : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : List[Any] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,) | 149 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( __a , __a , __a , __a , __a = None , __a = None , __a = None , ) -> Any:
"""simple docstring"""
if config_name_or_path is None:
lowerCamelCase__: List[str] ="facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase__: List[Any] =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__: Optional[Any] =question_encoder_name_or_path
lowerCamelCase__: Optional[Any] =RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase__: str =RagConfig.from_pretrained(_A )
lowerCamelCase__: Tuple =AutoConfig.from_pretrained(_A )
lowerCamelCase__: Tuple =AutoConfig.from_pretrained(_A )
lowerCamelCase__: Dict =gen_config
lowerCamelCase__: Dict =question_encoder_config
lowerCamelCase__: List[Any] =model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
lowerCamelCase__: Any =AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase__: List[str] =AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
__A = parser.parse_args()
__A = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 705 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = TextToVideoSDPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase__: Union[str, Any] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0)
lowerCamelCase__: List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__: Optional[Any] =CLIPTextModel(UpperCAmelCase_)
lowerCamelCase__: Dict =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: Tuple ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=0) ->Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Optional[int] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Any =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Optional[Any] =self.get_dummy_components()
lowerCamelCase__: List[Any] =TextToVideoSDPipeline(**UpperCAmelCase_)
lowerCamelCase__: int =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Any =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: List[Any] ="np"
lowerCamelCase__: Optional[int] =sd_pipe(**UpperCAmelCase_).frames
lowerCamelCase__: Dict =frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase__: Optional[int] =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy")
lowerCamelCase__: Optional[int] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: str =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: Tuple =pipe.to("cuda")
lowerCamelCase__: List[Any] ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="pt").frames
lowerCamelCase__: str =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy")
lowerCamelCase__: List[str] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: Any =pipe.to("cuda")
lowerCamelCase__: Dict ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="pt").frames
lowerCamelCase__: List[Any] =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 437 | 0 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase__ : Union[str, Any] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
lowercase__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( ) -> int:
__A : List[Any] = 'https://pypi.org/pypi/diffusers/json'
__A : int = json.loads(request.urlopen(SCREAMING_SNAKE_CASE__ ).read() )['releases'].keys()
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda __snake_case : version.Version(SCREAMING_SNAKE_CASE__ ) )
def _lowerCAmelCase ( ) -> Optional[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__A : int = Path(SCREAMING_SNAKE_CASE__ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def _lowerCAmelCase ( __snake_case : Union[str, os.PathLike] ) -> str:
init_hf_modules()
__A : str = Path(SCREAMING_SNAKE_CASE__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__A : Dict = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def _lowerCAmelCase ( __snake_case : int ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
__A : Dict = f.read()
# Imports of the form `import .xxx`
__A : Optional[int] = re.findall('^\s*import\s+\.(\S+)\s*$' , SCREAMING_SNAKE_CASE__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , SCREAMING_SNAKE_CASE__ , flags=re.MULTILINE )
# Unique-ify
return list(set(SCREAMING_SNAKE_CASE__ ) )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Dict:
__A : Optional[int] = False
__A : List[str] = [module_file]
__A : Union[str, Any] = []
# Let's recurse through all relative imports
while not no_change:
__A : List[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(SCREAMING_SNAKE_CASE__ ) )
__A : Tuple = Path(SCREAMING_SNAKE_CASE__ ).parent
__A : Optional[Any] = [str(module_path / m ) for m in new_imports]
__A : Optional[int] = [f for f in new_import_files if f not in all_relative_imports]
__A : List[str] = [f'{f}.py' for f in new_import_files]
__A : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) == 0
all_relative_imports.extend(SCREAMING_SNAKE_CASE__ )
return all_relative_imports
def _lowerCAmelCase ( __snake_case : Any ) -> int:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
__A : List[str] = f.read()
# Imports of the form `import xxx`
__A : int = re.findall('^\s*import\s+(\S+)\s*$' , SCREAMING_SNAKE_CASE__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , SCREAMING_SNAKE_CASE__ , flags=re.MULTILINE )
# Only keep the top-level module
__A : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
__A : List[Any] = list(set(SCREAMING_SNAKE_CASE__ ) )
__A : int = []
for imp in imports:
try:
importlib.import_module(SCREAMING_SNAKE_CASE__ )
except ImportError:
missing_packages.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f'{", ".join(SCREAMING_SNAKE_CASE__ )}. Run `pip install {" ".join(SCREAMING_SNAKE_CASE__ )}`' )
return get_relative_imports(SCREAMING_SNAKE_CASE__ )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[int]:
__A : Union[str, Any] = module_path.replace(os.path.sep , '.' )
__A : Any = importlib.import_module(SCREAMING_SNAKE_CASE__ )
if class_name is None:
return find_pipeline_class(SCREAMING_SNAKE_CASE__ )
return getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
from ..pipelines import DiffusionPipeline
__A : Optional[int] = dict(inspect.getmembers(SCREAMING_SNAKE_CASE__ , inspect.isclass ) )
__A : Any = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , SCREAMING_SNAKE_CASE__ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
__A : Any = cls
return pipeline_class
def _lowerCAmelCase ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> List[Any]:
__A : int = str(SCREAMING_SNAKE_CASE__ )
__A : str = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
__A : Optional[Any] = module_file_or_url
__A : Any = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
__A : Dict = get_diffusers_versions()
# cut ".dev0"
__A : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
__A : List[str] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
__A : Tuple = f'v{revision}'
elif revision == "main":
__A : str = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
__A : List[str] = COMMUNITY_PIPELINES_URL.format(revision=SCREAMING_SNAKE_CASE__ , pipeline=SCREAMING_SNAKE_CASE__ )
try:
__A : List[str] = cached_download(
SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , )
__A : Optional[Any] = 'git'
__A : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
__A : Optional[int] = hf_hub_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , )
__A : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
__A : Optional[int] = check_imports(SCREAMING_SNAKE_CASE__ )
# Now we move the module inside our cached dynamic modules.
__A : List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(SCREAMING_SNAKE_CASE__ )
__A : List[Any] = Path(SCREAMING_SNAKE_CASE__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(SCREAMING_SNAKE_CASE__ , submodule_path / module_file )
for module_needed in modules_needed:
__A : List[Any] = f'{module_needed}.py'
shutil.copy(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__A : Dict = use_auth_token
elif use_auth_token is True:
__A : Tuple = HfFolder.get_token()
else:
__A : str = None
__A : List[Any] = model_info(SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A : str = submodule_path / commit_hash
__A : int = full_submodule + os.path.sep + commit_hash
create_dynamic_module(SCREAMING_SNAKE_CASE__ )
if not (submodule_path / module_file).exists():
shutil.copy(SCREAMING_SNAKE_CASE__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
SCREAMING_SNAKE_CASE__ , f'{module_needed}.py' , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , )
return os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowerCAmelCase ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Tuple , ) -> Union[str, Any]:
__A : List[str] = get_cached_module_file(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , )
return get_class_in_module(SCREAMING_SNAKE_CASE__ , final_module.replace('.py' , '' ) ) | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[Any], SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Dict ) -> List[Any]:
"""simple docstring"""
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print('Building PyTorch model from configuration: {}'.format(str(SCREAMING_SNAKE_CASE__ ) ) )
__a = RemBertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) )
torch.save(model.state_dict(), SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 448 | 0 |
import math
def _lowerCamelCase ( A_ : int ) -> bool:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase__ : Optional[Any] =range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCamelCase ( A_ : Tuple , A_ : Union[str, Any]=1 , **A_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple =factor * value
UpperCamelCase__ : List[str] =value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 702 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''CLIPImageProcessor'''
snake_case__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict =kwargs.pop("feature_extractor")
UpperCamelCase__ : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
UpperCamelCase__ : Optional[int] =self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if images is not None:
UpperCamelCase__ : int =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if text is not None and images is not None:
UpperCamelCase__ : str =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE) , tensor_type=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict =self.tokenizer.model_input_names
UpperCamelCase__ : List[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 582 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
A_ : int =list[list[float | int]]
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Tuple )-> Matrix:
_lowerCamelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCamelCase = [[0 for _ in range(size + 1 )] for _ in range(_SCREAMING_SNAKE_CASE )]
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
for row in range(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
_lowerCamelCase = matrix[row][col]
_lowerCamelCase = vector[row][0]
_lowerCamelCase = 0
_lowerCamelCase = 0
while row < size and col < size:
# pivoting
_lowerCamelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCamelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _SCREAMING_SNAKE_CASE ):
_lowerCamelCase = augmented[rowa][col] / augmented[row][col]
_lowerCamelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _SCREAMING_SNAKE_CASE ):
for row in range(_SCREAMING_SNAKE_CASE ):
_lowerCamelCase = augmented[row][col] / augmented[col][col]
for cola in range(_SCREAMING_SNAKE_CASE , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_SCREAMING_SNAKE_CASE )
]
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] )-> Callable[[int], int]:
_lowerCamelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCamelCase = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
_lowerCamelCase = [[0] for _ in range(_SCREAMING_SNAKE_CASE )]
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
for x_val, y_val in enumerate(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
_lowerCamelCase = (x_val + 1) ** (size - col - 1)
_lowerCamelCase = y_val
_lowerCamelCase = solve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def interpolated_func(snake_case : Optional[Any] ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_SCREAMING_SNAKE_CASE ) )
return interpolated_func
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] )-> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] = question_function , snake_case : Any = 10 )-> int:
_lowerCamelCase = [func(_SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )]
_lowerCamelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCamelCase = 0
_lowerCamelCase = 42
_lowerCamelCase = 42
for poly in polynomials:
_lowerCamelCase = 1
while func(_SCREAMING_SNAKE_CASE ) == poly(_SCREAMING_SNAKE_CASE ):
x_val += 1
ret += poly(_SCREAMING_SNAKE_CASE )
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 650 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], Type[Formatter]] = {}
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], str] = {}
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], Exception] = {}
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,) -> str:
lowerCamelCase : List[str] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
lowerCamelCase : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
lowerCamelCase : Dict = format_type
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple:
lowerCamelCase : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
SCREAMING_SNAKE_CASE__ : str = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
SCREAMING_SNAKE_CASE__ : str = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
SCREAMING_SNAKE_CASE__ : Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A ( _SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Formatter:
lowerCamelCase : Tuple = get_format_type_from_alias(_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 311 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ : Optional[int] = 192
lowerCamelCase_ : Optional[Any] = 768
lowerCamelCase_ : Union[str, Any] = 12
lowerCamelCase_ : Optional[Any] = 3
lowerCamelCase_ : Any = [800, 1333]
lowerCamelCase_ : List[str] = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ : List[str] = 330
lowerCamelCase_ : Tuple = 14
lowerCamelCase_ : Any = 6
lowerCamelCase_ : Any = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase_ : Union[str, Any] = 384
lowerCamelCase_ : Union[str, Any] = 1536
lowerCamelCase_ : str = 12
lowerCamelCase_ : Tuple = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ : Any = [800, 1344]
lowerCamelCase_ : Optional[int] = 91
lowerCamelCase_ : Dict = '''huggingface/label-files'''
lowerCamelCase_ : int = '''coco-detection-id2label.json'''
lowerCamelCase_ : str = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ : Optional[Any] = idalabel
lowerCamelCase_ : Dict = {v: k for k, v in idalabel.items()}
return config
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_ : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : str = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ : List[str] = in_proj_bias[: config.hidden_size]
lowerCamelCase_ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ : Optional[int] = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if "backbone" in name:
lowerCamelCase_ : List[Any] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCamelCase_ : List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCamelCase_ : Tuple = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCamelCase_ : int = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase_ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCamelCase_ : Tuple = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase_ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase_ : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase_ : List[str] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase_ : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCamelCase_ : Tuple = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCamelCase_ : List[Any] = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCamelCase_ : Optional[Any] = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Tuple = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
lowerCamelCase_ : Union[str, Any] = key.split('''.''' )
lowerCamelCase_ : Any = int(key_split[2] )
lowerCamelCase_ : Any = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ : Optional[Any] = val[:dim, :]
lowerCamelCase_ : int = val[
dim : dim * 2, :
]
lowerCamelCase_ : Any = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Tuple = val[dim : dim * 2]
lowerCamelCase_ : Optional[int] = val[-dim:]
else:
lowerCamelCase_ : Optional[Any] = val
return orig_state_dict
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : int = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_yolos_config(__UpperCamelCase )
# load original state_dict
lowerCamelCase_ : Union[str, Any] = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
lowerCamelCase_ : Tuple = YolosForObjectDetection(__UpperCamelCase )
model.eval()
lowerCamelCase_ : int = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ : Optional[Any] = 800 if yolos_name != '''yolos_ti''' else 512
lowerCamelCase_ : Dict = YolosImageProcessor(format='''coco_detection''' , size=__UpperCamelCase )
lowerCamelCase_ : str = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : List[str] = model(**__UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ : Dict = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ : str = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ : int = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowerCamelCase_ : Optional[int] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowerCamelCase_ : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ : str = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowerCamelCase_ : Any = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ : Dict = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ : Dict = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowerCamelCase_ : Dict = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
lowerCamelCase_ : Union[str, Any] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : str = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='''hustvl''' )
model.push_to_hub(__UpperCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',"""
""" \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase : Any = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 708 |
'''simple docstring'''
def __snake_case (__UpperCAmelCase = 3 , __UpperCAmelCase = 7 , __UpperCAmelCase = 1000000 ):
"""simple docstring"""
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Tuple = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase_ : str = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase_ : Any = current_numerator
lowerCamelCase_ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 418 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Union[str, Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = ['''OwlViTFeatureExtractor''']
lowerCamelCase__ : List[Any] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 238 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( __a):
__a : int = """data2vec-vision"""
def __init__( self , _A=7_68 , _A=12 , _A=12 , _A=30_72 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1e-12 , _A=2_24 , _A=16 , _A=3 , _A=False , _A=False , _A=False , _A=False , _A=0.1 , _A=0.1 , _A=True , _A=[3, 5, 7, 11] , _A=[1, 2, 3, 6] , _A=True , _A=0.4 , _A=2_56 , _A=1 , _A=False , _A=2_55 , **_A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_A )
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Any = layer_norm_eps
_UpperCAmelCase : str = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Tuple = use_mask_token
_UpperCAmelCase : Union[str, Any] = use_absolute_position_embeddings
_UpperCAmelCase : Dict = use_relative_position_bias
_UpperCAmelCase : Tuple = use_shared_relative_position_bias
_UpperCAmelCase : List[Any] = layer_scale_init_value
_UpperCAmelCase : Tuple = drop_path_rate
_UpperCAmelCase : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase : Union[str, Any] = out_indices
_UpperCAmelCase : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : Optional[int] = use_auxiliary_head
_UpperCAmelCase : List[Any] = auxiliary_loss_weight
_UpperCAmelCase : List[Any] = auxiliary_channels
_UpperCAmelCase : Tuple = auxiliary_num_convs
_UpperCAmelCase : Union[str, Any] = auxiliary_concat_input
_UpperCAmelCase : Optional[int] = semantic_loss_ignore_index
class _UpperCAmelCase ( __a):
__a : Tuple = version.parse("""1.11""")
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __snake_case ( self ) -> float:
'''simple docstring'''
return 1e-4
| 238 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe.dual_guided(
prompt="first prompt" , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe.dual_guided(
prompt="first prompt" , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "cyberpunk 2077"
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe.dual_guided(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe.text_to_image(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase_ = pipe.image_variation(_UpperCAmelCase , generator=_UpperCAmelCase , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''convbert'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1e-12 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : str=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = head_ratio
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 14 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.