code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowercase_ ,lowercase_ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionDiffEditPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
SCREAMING_SNAKE_CASE__ : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ : str = frozenset([] )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=snake_case , )
_snake_case : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
_snake_case : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_zero=snake_case , )
torch.manual_seed(0 )
_snake_case : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_snake_case : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_snake_case : List[Any] = CLIPTextModel(snake_case )
_snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case : Dict = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self : Tuple , snake_case : List[Any] , snake_case : str=0 ):
"""simple docstring"""
_snake_case : str = floats_tensor((1, 16, 16) , rng=random.Random(snake_case ) ).to(snake_case )
_snake_case : Optional[int] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith('mps' ):
_snake_case : int = torch.manual_seed(snake_case )
else:
_snake_case : Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
_snake_case : Optional[Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Optional[Any] , snake_case : Optional[int] , snake_case : str=0 ):
"""simple docstring"""
_snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
_snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case : List[str] = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' )
if str(snake_case ).startswith('mps' ):
_snake_case : Optional[int] = torch.manual_seed(snake_case )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
_snake_case : int = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Tuple , snake_case : Dict , snake_case : List[str]=0 ):
"""simple docstring"""
_snake_case : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
_snake_case : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case : Tuple = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' )
if str(snake_case ).startswith('mps' ):
_snake_case : Union[str, Any] = torch.manual_seed(snake_case )
else:
_snake_case : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
_snake_case : Optional[Any] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Optional[Any] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(snake_case , snake_case , snake_case )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_snake_case : int = self.get_dummy_inputs(snake_case )
_snake_case : Optional[Any] = pipe(**snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case )
_snake_case : List[str] = self.pipeline_class.from_pretrained(snake_case )
pipe_loaded.to(snake_case )
pipe_loaded.set_progress_bar_config(disable=snake_case )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case , snake_case ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
_snake_case : List[str] = self.get_dummy_inputs(snake_case )
_snake_case : Optional[Any] = pipe_loaded(**snake_case )[0]
_snake_case : Tuple = np.abs(output - output_loaded ).max()
self.assertLess(snake_case , 1e-4 )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = 'cpu'
_snake_case : Tuple = self.get_dummy_components()
_snake_case : str = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_snake_case : str = self.get_dummy_mask_inputs(snake_case )
_snake_case : Union[str, Any] = pipe.generate_mask(**snake_case )
_snake_case : Union[str, Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_snake_case : str = np.array([0] * 9 )
_snake_case : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case : Tuple = 'cpu'
_snake_case : Tuple = self.get_dummy_components()
_snake_case : Any = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_snake_case : Union[str, Any] = self.get_dummy_inversion_inputs(snake_case )
_snake_case : str = pipe.invert(**snake_case ).images
_snake_case : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_snake_case : Optional[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
_snake_case : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1e-3 )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case : str = 'cpu'
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Any = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
_snake_case : str = DPMSolverMultistepScheduler(**snake_case )
_snake_case : List[Any] = DPMSolverMultistepInverseScheduler(**snake_case )
_snake_case : int = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_snake_case : Tuple = self.get_dummy_inversion_inputs(snake_case )
_snake_case : List[Any] = pipe.invert(**snake_case ).images
_snake_case : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_snake_case : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
_snake_case : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1e-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] ):
"""simple docstring"""
_snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
_snake_case : List[str] = raw_image.convert('RGB' ).resize((768, 768) )
_snake_case : Tuple = raw_image
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=snake_case , torch_dtype=torch.floataa )
_snake_case : str = DDIMScheduler.from_config(pipe.scheduler.config )
_snake_case : Any = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case )
_snake_case : Optional[int] = 'a bowl of fruit'
_snake_case : Union[str, Any] = 'a bowl of pears'
_snake_case : str = pipe.generate_mask(
image=self.raw_image , source_prompt=snake_case , target_prompt=snake_case , generator=snake_case , )
_snake_case : Any = pipe.invert(
prompt=snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case ).latents
_snake_case : Optional[Any] = pipe(
prompt=snake_case , mask_image=snake_case , image_latents=snake_case , generator=snake_case , negative_prompt=snake_case , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
_snake_case : Optional[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case : Any = torch.manual_seed(0 )
_snake_case : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=snake_case , torch_dtype=torch.floataa )
_snake_case : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case )
_snake_case : int = 'a bowl of fruit'
_snake_case : List[Any] = 'a bowl of pears'
_snake_case : List[str] = pipe.generate_mask(
image=self.raw_image , source_prompt=snake_case , target_prompt=snake_case , generator=snake_case , )
_snake_case : Optional[int] = pipe.invert(
prompt=snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case , num_inference_steps=25 , ).latents
_snake_case : Optional[int] = pipe(
prompt=snake_case , mask_image=snake_case , image_latents=snake_case , generator=snake_case , negative_prompt=snake_case , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
_snake_case : int = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 517 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( a__) -> Union[str, Any]:
"""simple docstring"""
def decorator(a__):
_snake_case : Tuple = getattr(a__ , 'handle_key' , [])
handle += [key]
setattr(a__ , 'handle_key' , a__)
return func
return decorator
def lowerCamelCase__ ( *a__) -> List[str]:
"""simple docstring"""
def decorator(a__):
_snake_case : List[str] = getattr(a__ , 'handle_key' , [])
handle += keys
setattr(a__ , 'handle_key' , a__)
return func
return decorator
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
def __new__( cls : int , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
"""simple docstring"""
_snake_case : int = super().__new__(cls , snake_case , snake_case , snake_case )
if not hasattr(snake_case , 'key_handler' ):
setattr(snake_case , 'key_handler' , {} )
setattr(snake_case , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
_snake_case : Optional[Any] = getattr(snake_case , 'handle_key' , [] )
for key in handled_keys:
_snake_case : str = value
return new_cls
@staticmethod
def __UpperCAmelCase ( cls : List[Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
_snake_case : str = ord(snake_case )
_snake_case : str = cls.key_handler.get(snake_case )
if handler:
_snake_case : Optional[int] = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls) -> Optional[Any]:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy())
| 517 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int=0 ):
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
_SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCamelCase_ : Optional[int] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCamelCase_ : str = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
UpperCamelCase_ : str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCamelCase_ : Dict = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
UpperCamelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCamelCase_ : int = os.path.join(_SCREAMING_SNAKE_CASE , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(f'''Saving model to {ckpt_dir}''' )
UpperCamelCase_ : Optional[Any] = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=_SCREAMING_SNAKE_CASE , storage_writer=dist_cp.FileSystemWriter(_SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_SCREAMING_SNAKE_CASE ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
UpperCamelCase_ : Optional[Any] = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
UpperCamelCase_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(f'''Loading model from {input_model_file}''' )
UpperCamelCase_ : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCamelCase_ : List[Any] = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
UpperCamelCase_ : Dict = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(f'''Loading model from {input_model_file}''' )
UpperCamelCase_ : Any = torch.load(_SCREAMING_SNAKE_CASE )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCamelCase_ : List[Any] = (
os.path.join(_SCREAMING_SNAKE_CASE , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
UpperCamelCase_ : Any = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_SCREAMING_SNAKE_CASE , storage_reader=dist_cp.FileSystemReader(_SCREAMING_SNAKE_CASE ) , planner=DefaultLoadPlanner() , )
UpperCamelCase_ : Dict = state_dict["""model"""]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any]=0 ):
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
_SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCamelCase_ : Union[str, Any] = FSDP.optim_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCamelCase_ : Optional[int] = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
UpperCamelCase_ : str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
UpperCamelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(_SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCamelCase_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCamelCase_ : Tuple = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
UpperCamelCase_ : str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
UpperCamelCase_ : Dict = torch.load(_SCREAMING_SNAKE_CASE )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
UpperCamelCase_ : List[Any] = (
os.path.join(_SCREAMING_SNAKE_CASE , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
UpperCamelCase_ : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(_SCREAMING_SNAKE_CASE ) , )
UpperCamelCase_ : List[Any] = optim_state["""optimizer"""]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
UpperCamelCase_ : Any = FSDP.optim_state_dict_to_load(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
optimizer.load_state_dict(_SCREAMING_SNAKE_CASE )
| 138 | import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def A_ (self , __UpperCamelCase = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def A_ (self ) -> Union[str, Any]:
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__(self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Tuple = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = len(__UpperCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCamelCase )}.''' )
# get prompt text embeddings
UpperCamelCase_ : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase_ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase_ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : int = text_embeddings.shape
UpperCamelCase_ : Union[str, Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
UpperCamelCase_ : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ : List[str]
if negative_prompt is None:
UpperCamelCase_ : Union[str, Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !='''
f''' {type(__UpperCamelCase )}.''' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : List[Any] = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCamelCase_ : Dict = negative_prompt
UpperCamelCase_ : Optional[Any] = text_input_ids.shape[-1]
UpperCamelCase_ : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
UpperCamelCase_ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ : int = uncond_embeddings.shape[1]
UpperCamelCase_ : List[str] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase_ : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ : int = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
UpperCamelCase_ : Tuple = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
UpperCamelCase_ : Optional[Any] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase_ : Union[str, Any] = latents_reference.to(self.device )
UpperCamelCase_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase_ : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase_ : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase_ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase_ : List[str] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase_ : str = 0 if dx < 0 else dx
UpperCamelCase_ : List[str] = 0 if dy < 0 else dy
UpperCamelCase_ : Dict = max(-dx , 0 )
UpperCamelCase_ : str = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase_ : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ : str = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ : Any = {}
if accepts_eta:
UpperCamelCase_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ : str = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
UpperCamelCase_ : str = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_,UpperCamelCase_ : Any = noise_pred.chunk(2 )
UpperCamelCase_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : int = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Optional[int] = 1 / 0.18_215 * latents
UpperCamelCase_ : List[str] = self.vae.decode(__UpperCamelCase ).sample
UpperCamelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase_ : List[Any] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase_ : Tuple = None
if output_type == "pil":
UpperCamelCase_ : Any = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 138 | 1 |
'''simple docstring'''
from math import sqrt
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
snake_case__ : Dict = 0
for i in range(1 , int(sqrt(__magic_name__ ) + 1 ) ):
if n % i == 0 and i != sqrt(__magic_name__ ):
total += i + n // i
elif i == sqrt(__magic_name__ ):
total += i
return total - n
def UpperCamelCase__ ( __magic_name__ : int = 1_00_00 ) -> int:
'''simple docstring'''
snake_case__ : Dict = sum(
i
for i in range(1 , __magic_name__ )
if sum_of_divisors(sum_of_divisors(__magic_name__ ) ) == i and sum_of_divisors(__magic_name__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int ):
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase : Dict = _modexpt(_snake_case , exponent // 2 , _snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_snake_case , exponent - 1 , _snake_case )) % modulo_value
def _snake_case ( _snake_case : int = 1777 , _snake_case : int = 1855 , _snake_case : int = 8 ):
lowerCAmelCase : Union[str, Any] = base
for _ in range(1 , _snake_case ):
lowerCAmelCase : Optional[Any] = _modexpt(_snake_case , _snake_case , 10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637 |
"""simple docstring"""
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = val
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : int = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = val
def _snake_case ( _snake_case : Tuple , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : Optional[int] = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 637 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCAmelCase_ : List[str] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase_ ( cls ):
_SCREAMING_SNAKE_CASE : Optional[Any] = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def UpperCAmelCase_ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_SCREAMING_SNAKE_CASE : List[Any] = FlaxBertModel(__snake_case )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_SCREAMING_SNAKE_CASE : Optional[int] = flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case , repo_id="""test-model-flax""" , push_to_hub=__snake_case , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1e-3 , msg=f"""{key} not identical""" )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModel(__snake_case )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Tuple = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__snake_case , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : str = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_SCREAMING_SNAKE_CASE : int = flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1e-3 , msg=f"""{key} not identical""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_dict(modela.params )
_SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
return models_are_equal
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_SCREAMING_SNAKE_CASE : Tuple = FlaxBertModel(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) )
with self.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModel(__snake_case )
_SCREAMING_SNAKE_CASE : str = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) , max_shard_size="""10KB""" )
with self.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = FlaxBertModel.from_pretrained(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = """bert"""
_SCREAMING_SNAKE_CASE : Tuple = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : Dict = FlaxBertModel.from_pretrained(__snake_case )
_SCREAMING_SNAKE_CASE : int = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = """bert"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
| 533 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 533 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
def UpperCAmelCase_ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
return new_state_dict
def UpperCAmelCase_ ( snake_case__ , snake_case__=False ) -> str:
"""simple docstring"""
lowerCAmelCase__ = ''
if is_panoptic:
lowerCAmelCase__ = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:256, :]
lowerCAmelCase__ = in_proj_bias[:256]
lowerCAmelCase__ = in_proj_weight[256:512, :]
lowerCAmelCase__ = in_proj_bias[256:512]
lowerCAmelCase__ = in_proj_weight[-256:, :]
lowerCAmelCase__ = in_proj_bias[-256:]
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCAmelCase__ = 'resnet101'
if "dc5" in model_name:
lowerCAmelCase__ = True
lowerCAmelCase__ = 'panoptic' in model_name
if is_panoptic:
lowerCAmelCase__ = 250
else:
lowerCAmelCase__ = 91
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'coco-detection-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(snake_case__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
# load image processor
lowerCAmelCase__ = 'coco_panoptic' if is_panoptic else 'coco_detection'
lowerCAmelCase__ = ConditionalDetrImageProcessor(format=snake_case__ )
# prepare image
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors='pt' )
lowerCAmelCase__ = encoding['pixel_values']
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
lowerCAmelCase__ = torch.hub.load('DeppMeng/ConditionalDETR' , snake_case__ , pretrained=snake_case__ ).eval()
lowerCAmelCase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCAmelCase__ = 'conditional_detr.' + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = rename_backbone_keys(snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ = ConditionalDetrForSegmentation(snake_case__ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
model.push_to_hub(repo_id=snake_case__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
lowerCAmelCase__ = conditional_detr(snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_lowerCAmelCase : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 710 |
import heapq
import sys
import numpy as np
_lowerCAmelCase : str = tuple[int, int]
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return len(self.elements ) == 0
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements ,(priority, item) )
self.set.add(a_ )
else:
# update
# print("update", item)
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements ,(pro, xxx) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if item in self.set:
self.set.remove(a_ )
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements ,(prito, yyy) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.elements[0][1]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
self.set.remove(a_ )
return (priority, item)
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = np.array(snake_case__ )
lowerCAmelCase__ = np.array(snake_case__ )
return np.linalg.norm(a - b )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
lowerCAmelCase__ = '*'
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase__ = '#'
lowerCAmelCase__ = '-'
lowerCAmelCase__ = back_pointer[goal]
while x != start:
((lowerCAmelCase__) , (lowerCAmelCase__)) = x
# print(x)
lowerCAmelCase__ = '-'
lowerCAmelCase__ = back_pointer[x]
lowerCAmelCase__ = '-'
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCAmelCase__ = back_pointer[goal]
while x != start:
print(snake_case__ , end=' ' )
lowerCAmelCase__ = back_pointer[x]
print(snake_case__ )
sys.exit()
def UpperCAmelCase_ ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Union[str, Any]:
"""simple docstring"""
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
((lowerCAmelCase__) , (lowerCAmelCase__)) = s
lowerCAmelCase__ = (x - 1, y)
lowerCAmelCase__ = (x + 1, y)
lowerCAmelCase__ = (x, y + 1)
lowerCAmelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
lowerCAmelCase__ = -1
lowerCAmelCase__ = float('inf' )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase__ = g_function[s] + 1
lowerCAmelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_lowerCAmelCase : Tuple = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_lowerCAmelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
_lowerCAmelCase : Any = make_common_ground()
_lowerCAmelCase : List[str] = blocks_blk
# hyper parameters
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = 2_0
_lowerCAmelCase : str = 3 # one consistent and two other inconsistent
# start and end destination
_lowerCAmelCase : Tuple = (0, 0)
_lowerCAmelCase : List[str] = (n - 1, n - 1)
_lowerCAmelCase : str = 1
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {start: 0, goal: float('inf' )}
lowerCAmelCase__ = {start: -1, goal: -1}
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowerCAmelCase__ , lowerCAmelCase__ = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowerCAmelCase__ = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 604 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : Optional[Any], A_ : Tuple, A_ : Dict ):
'''simple docstring'''
if len(__lowercase ) != len(__lowercase ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCamelCase : int = [p / w for p, w in zip(__lowercase, __lowercase )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCamelCase : Optional[int] = sorted(__lowercase )
# declaring useful variables
_lowerCamelCase : Optional[int] = len(__lowercase )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Optional[int] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCamelCase : List[str] = sorted_profit_by_weight[length - i - 1]
_lowerCamelCase : Optional[Any] = profit_by_weight.index(__lowercase )
_lowerCamelCase : str = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
lowerCAmelCase__ = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
lowerCAmelCase__ = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
lowerCAmelCase__ = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 83 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'nielsr/canine-s': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
__SCREAMING_SNAKE_CASE = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0XE_0_0_0
__SCREAMING_SNAKE_CASE = 0XE_0_0_1
__SCREAMING_SNAKE_CASE = 0XE_0_0_2
__SCREAMING_SNAKE_CASE = 0XE_0_0_3
__SCREAMING_SNAKE_CASE = 0XE_0_0_4
# Maps special codepoints to human-readable names.
__SCREAMING_SNAKE_CASE = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__SCREAMING_SNAKE_CASE = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class a__ ( A__ ):
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :List[str] , _lowerCamelCase :Optional[Any]=chr(_lowerCamelCase ) , _lowerCamelCase :List[str]=chr(_lowerCamelCase ) , _lowerCamelCase :Any=chr(_lowerCamelCase ) , _lowerCamelCase :Optional[int]=chr(_lowerCamelCase ) , _lowerCamelCase :Tuple=chr(_lowerCamelCase ) , _lowerCamelCase :Union[str, Any]=chr(_lowerCamelCase ) , _lowerCamelCase :str=False , _lowerCamelCase :Union[str, Any]=2_048 , **_lowerCamelCase :Tuple , ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
UpperCamelCase_ : List[str] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
UpperCamelCase_ : List[str] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
UpperCamelCase_ : Tuple =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
UpperCamelCase_ : Optional[int] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ : Dict =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , model_max_length=_lowerCamelCase , **_lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase_ : Dict[str, int] ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase_ : List[str] =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase_ : Dict[int, str] ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase_ : str =UNICODE_VOCAB_SIZE
UpperCamelCase_ : Any =len(self._special_codepoints )
@property
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return self._unicode_vocab_size
def lowerCamelCase_ ( self :str , _lowerCamelCase :str ):
'''simple docstring'''
return list(_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :str ):
'''simple docstring'''
try:
return ord(_lowerCamelCase )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def lowerCamelCase_ ( self :str , _lowerCamelCase :int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_lowerCamelCase )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :str ):
'''simple docstring'''
return "".join(_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : str =[self.sep_token_id]
UpperCamelCase_ : Tuple =[self.cls_token_id]
UpperCamelCase_ : List[str] =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
UpperCamelCase_ : int =[1] + ([0] * len(_lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_lowerCamelCase )) + [1]
return result
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : Dict =[self.sep_token_id]
UpperCamelCase_ : Optional[int] =[self.cls_token_id]
UpperCamelCase_ : Dict =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
return ()
| 357 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowercase = ['''gpt2''']
lowercase = '''gpt2'''
if is_tf_available():
class A_ ( tf.Module ):
def __init__( self : int , __lowerCamelCase : int ) -> str:
super().__init__()
__magic_name__ = tokenizer
__magic_name__ = AutoConfig.from_pretrained(UpperCAmelCase_ )
__magic_name__ = TFGPTaLMHeadModel.from_config(UpperCAmelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def _snake_case ( self : Any , __lowerCamelCase : int ) -> List[str]:
__magic_name__ = self.tokenizer(UpperCAmelCase_ )
__magic_name__ = tokenized['input_ids'].to_tensor()
__magic_name__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__magic_name__ = self.model(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class A_ ( unittest.TestCase ):
def _snake_case ( self : int ) -> str:
super().setUp()
__magic_name__ = [GPTaTokenizer.from_pretrained(UpperCAmelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__magic_name__ = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__magic_name__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__magic_name__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self : str ) -> List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__magic_name__ = tokenizer([test_inputs] , return_tensors="tf" )
__magic_name__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__magic_name__ = python_outputs[key].numpy()
__magic_name__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case ( self : List[Any] ) -> int:
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = tf.function(UpperCAmelCase_ )
for test_inputs in self.test_sentences:
__magic_name__ = tf.constant(UpperCAmelCase_ )
__magic_name__ = compiled_tokenizer(UpperCAmelCase_ )
__magic_name__ = tf_tokenizer(UpperCAmelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self : Union[str, Any] ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = ModelToSave(tokenizer=UpperCAmelCase_ )
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = model.serving(UpperCAmelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__magic_name__ = Path(UpperCAmelCase_ ) / 'saved.model'
tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": model.serving} )
__magic_name__ = tf.saved_model.load(UpperCAmelCase_ )
__magic_name__ = loaded_model.signatures['serving_default'](UpperCAmelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case ( self : Dict ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = tf_tokenizer(UpperCAmelCase_ ) # Build model with some sample inputs
__magic_name__ = tf_tokenizer.get_config()
__magic_name__ = TFGPTaTokenizer.from_config(UpperCAmelCase_ )
__magic_name__ = model_from_config(UpperCAmelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case ( self : List[Any] ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__magic_name__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = tf_tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
__magic_name__ = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 706 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowercase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowercase , lowercase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowercase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowercase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowercase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 468 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCAmelCase = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
UpperCAmelCase = parser.parse_args()
if args.check_lib:
UpperCAmelCase = importlib.import_module('''transformers''')
UpperCAmelCase = Path(transformers_module.__file__).parent
else:
UpperCAmelCase = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 84 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase_ = CLIPTextModel(__UpperCamelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("""RGB""" )
if str(__UpperCamelCase ).startswith("""mps""" ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowerCamelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = """french fries"""
lowerCamelCase_ = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = [inputs["""prompt"""]] * 2
lowerCamelCase_ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
lowerCamelCase_ = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
lowerCamelCase_ = image / 2 + 0.5
lowerCamelCase_ = image.permute(0 , 3 , 1 , 2 )
lowerCamelCase_ = image.repeat(2 , 1 , 1 , 1 )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" ) )[0]
lowerCamelCase_ = components["""vae"""]
lowerCamelCase_ = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ = pipe(**__UpperCamelCase )[0]
lowerCamelCase_ = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any]=0 ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
lowerCamelCase_ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowerCamelCase_ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = 0
def callback_fn(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCamelCase_ = False
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ = inputs["""image"""].resize((5_0_4, 5_0_4) )
lowerCamelCase_ = """timbrooks/instruct-pix2pix"""
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCamelCase_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 272 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = SamImageProcessor()
__lowerCamelCase : int = SamProcessor(_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Tuple , **_lowerCamelCase : Any ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def _snake_case ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCamelCase : Optional[int] = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[int] = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
__lowerCamelCase : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : int = self.get_image_processor()
__lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCamelCase : List[str] = image_processor(_lowerCamelCase , return_tensors="""np""" )
__lowerCamelCase : List[Any] = processor(images=_lowerCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Dict = self.get_image_processor()
__lowerCamelCase : Union[str, Any] = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = [torch.ones((1, 3, 5, 5) )]
__lowerCamelCase : Any = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : Any = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : Tuple = processor.post_process_masks(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCamelCase : Union[str, Any] = processor.post_process_masks(
_lowerCamelCase , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
__lowerCamelCase : Any = [np.ones((1, 3, 5, 5) )]
__lowerCamelCase : int = processor.post_process_masks(_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCamelCase : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(_lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = processor.post_process_masks(_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) )
@require_vision
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
__lowerCamelCase : int = SamImageProcessor()
__lowerCamelCase : Dict = SamProcessor(_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : List[str] , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def _snake_case ( self : str ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCamelCase : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
__lowerCamelCase : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.get_image_processor()
__lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : str = self.prepare_image_inputs()
__lowerCamelCase : List[Any] = image_processor(_lowerCamelCase , return_tensors="""np""" )
__lowerCamelCase : Union[str, Any] = processor(images=_lowerCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.get_image_processor()
__lowerCamelCase : str = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : Any = [tf.ones((1, 3, 5, 5) )]
__lowerCamelCase : Optional[Any] = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : List[str] = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : Optional[int] = processor.post_process_masks(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCamelCase : Optional[Any] = processor.post_process_masks(
_lowerCamelCase , tf.convert_to_tensor(_lowerCamelCase ) , tf.convert_to_tensor(_lowerCamelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
__lowerCamelCase : Tuple = [np.ones((1, 3, 5, 5) )]
__lowerCamelCase : str = processor.post_process_masks(
_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCamelCase : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowerCamelCase : Optional[Any] = processor.post_process_masks(
_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = SamImageProcessor()
__lowerCamelCase : Dict = SamProcessor(_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[int] , **_lowerCamelCase : Tuple ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def _snake_case ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCamelCase : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.get_image_processor()
__lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : str = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowerCamelCase : Tuple = [tf.convert_to_tensor(_lowerCamelCase )]
__lowerCamelCase : List[str] = [torch.tensor(_lowerCamelCase )]
__lowerCamelCase : Optional[int] = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : Tuple = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : int = processor.post_process_masks(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""tf""" )
__lowerCamelCase : Any = processor.post_process_masks(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.get_image_processor()
__lowerCamelCase : int = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : Any = self.prepare_image_inputs()
__lowerCamelCase : Any = image_processor(_lowerCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
__lowerCamelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
__lowerCamelCase : Dict = image_processor(_lowerCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
__lowerCamelCase : int = processor(images=_lowerCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
| 710 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Dict=1_0 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Tuple=3_2 * 8 , _lowerCamelCase : int=3_2 * 8 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=6_4 , ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_auxiliary_loss
__lowerCamelCase : Optional[int] = num_queries
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = min_size
__lowerCamelCase : Optional[Any] = max_size
__lowerCamelCase : str = num_labels
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Optional[Any] = hidden_dim
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
__lowerCamelCase : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
__lowerCamelCase : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
__lowerCamelCase : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase : List[Any] = self.num_queries
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : List[str] = [1, 1, 1, 1]
__lowerCamelCase : Optional[int] = self.num_channels
__lowerCamelCase : Optional[int] = 6_4
__lowerCamelCase : int = 1_2_8
__lowerCamelCase : Any = self.hidden_dim
__lowerCamelCase : List[str] = self.hidden_dim
__lowerCamelCase : List[Any] = self.hidden_dim
return config
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _snake_case ( self : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = output.encoder_hidden_states
__lowerCamelCase : Any = output.pixel_decoder_hidden_states
__lowerCamelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def _snake_case ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[str]=False ):
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase : Any = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : Dict = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : List[Any] = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
__lowerCamelCase : Tuple = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
__lowerCamelCase : str = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCamelCase ( A,A,unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a_ : Union[str, Any] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
a_ : Optional[Any] = False
a_ : int = False
a_ : List[str] = False
a_ : List[Any] = False
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Dict = MaskaFormerModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def _snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def _snake_case ( self : str ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(_lowerCamelCase )
__lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase : Dict = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
__lowerCamelCase : Optional[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_lowerCamelCase ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=_lowerCamelCase ),
"""class_labels""": torch.zeros(2 , 1_0 , device=_lowerCamelCase ).long(),
}
__lowerCamelCase : Any = self.model_tester.get_config()
__lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(_lowerCamelCase ).to(_lowerCamelCase )
__lowerCamelCase : int = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self : Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase : str = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
__lowerCamelCase : Tuple = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Any = True
__lowerCamelCase : Any = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase : str = 1E-4
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _snake_case ( self : Dict ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
__lowerCamelCase : str = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : List[Any] = image_processor(_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__lowerCamelCase : str = model(**_lowerCamelCase )
__lowerCamelCase : Dict = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
__lowerCamelCase : Tuple = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
__lowerCamelCase : Any = self.default_image_processor
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : int = image_processor(_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__lowerCamelCase : Dict = model(**_lowerCamelCase )
# masks_queries_logits
__lowerCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase : Any = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
__lowerCamelCase : List[str] = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
__lowerCamelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase : List[Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
__lowerCamelCase : Dict = self.default_image_processor
__lowerCamelCase : List[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowerCamelCase : int = inputs["""pixel_values"""].to(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = [el.to(_lowerCamelCase ) for el in inputs["""mask_labels"""]]
__lowerCamelCase : Union[str, Any] = [el.to(_lowerCamelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowerCamelCase : Dict = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 458 | 0 |
def _snake_case ( __snake_case = 10**12 ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ : List[str] =imread(r'''digital_image_processing/image_data/lena_small.jpg''')
A__ : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase = canny.canny(lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
assert gg.gaussian_filter(lowerCAmelCase , 5 , sigma=0.9 ).all()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase = conv.img_convolve(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase )
assert res.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
assert med.median_filter(lowerCAmelCase , 3 ).any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(lowerCAmelCase )
assert grad.any() and theta.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = sp.make_sepia(lowerCAmelCase , 20 )
assert sepia.all()
def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
_lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
_lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
_lowerCAmelCase = imread(lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = image[x_coordinate][y_coordinate]
_lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert lbp_image.any()
| 207 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A_ :List[Any] = logging.get_logger(__name__)
def A ( a_ ,a_ ) -> Optional[int]:
__UpperCamelCase : int =set()
__UpperCamelCase : int =[]
def parse_line(a_ ):
for line in fp:
if isinstance(a_ ,a_ ):
__UpperCamelCase : Tuple =line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(a_ ) > 0:
__UpperCamelCase : int ='\n'.join(a_ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(a_ )
buffer.clear()
continue
else:
__UpperCamelCase : int =line.strip()
buffer.append(a_ )
if from_gh:
for filename in os.listdir(a_ ):
__UpperCamelCase : Optional[int] =os.path.join(a_ ,a_ )
if not os.path.isdir(a_ ):
# read the file
if filename != "warnings.txt":
continue
with open(a_ ) as fp:
parse_line(a_ )
else:
try:
with zipfile.ZipFile(a_ ) as z:
for filename in z.namelist():
if not os.path.isdir(a_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a_ ) as fp:
parse_line(a_ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def A ( a_ ,a_ ) -> Tuple:
__UpperCamelCase : Any =set()
__UpperCamelCase : int =[os.path.join(a_ ,a_ ) for p in os.listdir(a_ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a_ ,a_ ) )
return selected_warnings
if __name__ == "__main__":
def A ( a_ ) -> Tuple:
return values.split(',' )
A_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
A_ :Optional[int] = parser.parse_args()
A_ :List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A_ :Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A_ :Optional[Any] = extract_warnings(args.output_dir, args.targets)
A_ :List[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 154 |
def A ( a_ = 600_851_475_143 ) -> int:
try:
__UpperCamelCase : int =int(a_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__UpperCamelCase : List[str] =2
__UpperCamelCase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCamelCase : Optional[Any] =i
while n % i == 0:
__UpperCamelCase : Any =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 154 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase : List[str] = logging.getLogger(__name__)
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : int = """sequence-classification"""
def __init__( self ,__a ) -> Dict:
if type(__a ) == dict:
snake_case : int = Namespace(**__a )
snake_case : Tuple = glue_output_modes[hparams.task]
snake_case : List[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(__a ,__a ,self.mode )
def snake_case_ ( self ,**__a ) -> int:
return self.model(**__a )
def snake_case_ ( self ,__a ,__a ) -> Any:
snake_case : List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case : List[str] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
snake_case : Union[str, Any] = self(**__a )
snake_case : Optional[Any] = outputs[0]
snake_case : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
snake_case : str = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case_ ( self ) -> Optional[Any]:
snake_case : Any = self.hparams
snake_case : Union[str, Any] = processors[args.task]()
snake_case : str = processor.get_labels()
for mode in ["train", "dev"]:
snake_case : Dict = self._feature_file(__a )
if os.path.exists(__a ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,__a )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
snake_case : int = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
snake_case : Union[str, Any] = convert_examples_to_features(
__a ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,__a )
torch.save(__a ,__a )
def snake_case_ ( self ,__a ,__a ,__a = False ) -> DataLoader:
snake_case : str = """dev""" if mode == """test""" else mode
snake_case : Dict = self._feature_file(__a )
logger.info("""Loading features from cached file %s""" ,__a )
snake_case : Dict = torch.load(__a )
snake_case : List[Any] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
snake_case : Union[str, Any] = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
snake_case : Tuple = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case : Any = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(__a ,__a ,__a ,__a ) ,batch_size=__a ,shuffle=__a ,)
def snake_case_ ( self ,__a ,__a ) -> Optional[int]:
snake_case : List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case : Union[str, Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
snake_case : Any = self(**__a )
snake_case , snake_case : int = outputs[:2]
snake_case : Tuple = logits.detach().cpu().numpy()
snake_case : Optional[int] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case_ ( self ,__a ) -> tuple:
snake_case : List[Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
snake_case : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case : Any = np.argmax(__a ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case : List[str] = np.squeeze(__a )
snake_case : List[str] = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
snake_case : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
snake_case : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
snake_case : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,__a ,__a )}
snake_case : Any = dict(results.items() )
snake_case : List[Any] = results
return ret, preds_list, out_label_list
def snake_case_ ( self ,__a ) -> dict:
snake_case , snake_case , snake_case : int = self._eval_end(__a )
snake_case : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case_ ( self ,__a ) -> dict:
snake_case , snake_case , snake_case : str = self._eval_end(__a )
snake_case : str = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case_ ( __a ,__a ) -> List[Any]:
BaseTransformer.add_model_specific_args(__a ,__a )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=__a ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=__a ,required=__a ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=__a ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowerCamelCase__ ( ):
snake_case : str = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
snake_case : int = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
snake_case : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case : List[str] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
snake_case : Optional[int] = GLUETransformer(__lowercase )
snake_case : Optional[Any] = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__lowercase ) )
snake_case : List[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main()
| 116 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase : Tuple = sys.version_info >= (3, 10)
def lowerCamelCase__ ( __lowercase=None , __lowercase=None ):
return field(default_factory=lambda: default , metadata=__lowercase )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : int
lowerCAmelCase_ : float
lowerCAmelCase_ : str
lowerCAmelCase_ : bool
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : int = 42
lowerCAmelCase_ : str = field(default="""toto""", metadata={"""help""": """help message"""} )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : Optional[bool] = None
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : int = """titi"""
lowerCAmelCase_ : Optional[int] = """toto"""
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = """titi"""
lowerCAmelCase_ : Dict = """toto"""
lowerCAmelCase_ : int = 42
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : BasicEnum = "toto"
def snake_case_ ( self ) -> str:
snake_case : Union[str, Any] = BasicEnum(self.foo )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : MixedTypeEnum = "toto"
def snake_case_ ( self ) -> Tuple:
snake_case : List[str] = MixedTypeEnum(self.foo )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[float] = field(default=a__, metadata={"""help""": """help message"""} )
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[List[str]] = list_field(default=[] )
lowerCAmelCase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : List[int] = list_field(default=[] )
lowerCAmelCase_ : List[int] = list_field(default=[1, 2, 3] )
lowerCAmelCase_ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
lowerCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : List[int] = field()
lowerCAmelCase_ : str = field()
lowerCAmelCase_ : BasicEnum = field()
def snake_case_ ( self ) -> int:
snake_case : Optional[int] = BasicEnum(self.required_enum )
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : int
lowerCAmelCase_ : "BasicEnum" = field()
lowerCAmelCase_ : "Optional[bool]" = None
lowerCAmelCase_ : "str" = field(default="""toto""", metadata={"""help""": """help message"""} )
lowerCAmelCase_ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool | None = None
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : int | None = None
lowerCAmelCase_ : float | None = field(default=a__, metadata={"""help""": """help message"""} )
lowerCAmelCase_ : str | None = None
lowerCAmelCase_ : list[str] | None = list_field(default=[] )
lowerCAmelCase_ : list[int] | None = list_field(default=[] )
class _a (unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ,__a ,__a ) -> Optional[int]:
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
snake_case : Optional[int] = {k: v for k, v in vars(__a ).items() if k != """container"""}
snake_case : Tuple = {k: v for k, v in vars(__a ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" ,__a ) and yy.get("""choices""" ,__a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__a ) ,yy["""type"""](__a ) )
del xx["type"], yy["type"]
self.assertEqual(__a ,__a )
def snake_case_ ( self ) -> int:
snake_case : str = HfArgumentParser(__a )
snake_case : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=__a ,required=__a )
expected.add_argument("""--bar""" ,type=__a ,required=__a )
expected.add_argument("""--baz""" ,type=__a ,required=__a )
expected.add_argument("""--flag""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" )
self.argparsersEqual(__a ,__a )
snake_case : Optional[int] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case) , ) : Dict = parser.parse_args_into_dataclasses(__a ,look_for_args_file=__a )
self.assertFalse(example.flag )
def snake_case_ ( self ) -> int:
snake_case : Optional[int] = HfArgumentParser(__a )
snake_case : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,default=42 ,type=__a )
expected.add_argument("""--baz""" ,default="""toto""" ,type=__a ,help="""help message""" )
self.argparsersEqual(__a ,__a )
def snake_case_ ( self ) -> Optional[Any]:
snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" )
expected.add_argument("""--baz""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" ,action="""store_false""" ,default=__a ,dest="""baz""" )
expected.add_argument("""--opt""" ,type=__a ,default=__a )
snake_case : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
snake_case : Optional[int] = HfArgumentParser(__a )
self.argparsersEqual(__a ,__a )
snake_case : List[str] = parser.parse_args([] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
snake_case : str = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
snake_case : Union[str, Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
snake_case : Union[str, Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
snake_case : List[str] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) )
def snake_case_ ( self ) -> str:
snake_case : Any = HfArgumentParser(__a )
snake_case : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" ,default="""toto""" ,choices=["""titi""", """toto""", 42] ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,)
self.argparsersEqual(__a ,__a )
snake_case : Any = parser.parse_args([] )
self.assertEqual(args.foo ,"""toto""" )
snake_case : str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
snake_case : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo ,"""titi""" )
snake_case : Optional[Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
snake_case : Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo ,42 )
snake_case : Any = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def snake_case_ ( self ) -> Union[str, Any]:
@dataclass
class _a :
'''simple docstring'''
lowerCAmelCase_ : Literal["titi", "toto", 42] = "toto"
snake_case : Tuple = HfArgumentParser(__a )
snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" ,default="""toto""" ,choices=("""titi""", """toto""", 42) ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,)
self.argparsersEqual(__a ,__a )
snake_case : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo ,"""toto""" )
snake_case : int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo ,"""titi""" )
snake_case : Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo ,42 )
def snake_case_ ( self ) -> List[Any]:
snake_case : Any = HfArgumentParser(__a )
snake_case : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" ,nargs="""+""" ,default=[] ,type=__a )
expected.add_argument("""--bar_int""" ,nargs="""+""" ,default=[1, 2, 3] ,type=__a )
expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=__a )
expected.add_argument("""--foo_float""" ,nargs="""+""" ,default=[0.1, 0.2, 0.3] ,type=__a )
self.argparsersEqual(__a ,__a )
snake_case : int = parser.parse_args([] )
self.assertEqual(
__a ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["""Hallo""", """Bonjour""", """Hello"""] ,foo_float=[0.1, 0.2, 0.3] ) ,)
snake_case : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__a ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["""a""", """b""", """c"""] ,foo_float=[0.1, 0.7] ) )
def snake_case_ ( self ) -> Optional[int]:
snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,default=__a ,type=__a )
expected.add_argument("""--bar""" ,default=__a ,type=__a ,help="""help message""" )
expected.add_argument("""--baz""" ,default=__a ,type=__a )
expected.add_argument("""--ces""" ,nargs="""+""" ,default=[] ,type=__a )
expected.add_argument("""--des""" ,nargs="""+""" ,default=[] ,type=__a )
snake_case : Optional[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
snake_case : Any = HfArgumentParser(__a )
self.argparsersEqual(__a ,__a )
snake_case : Any = parser.parse_args([] )
self.assertEqual(__a ,Namespace(foo=__a ,bar=__a ,baz=__a ,ces=[] ,des=[] ) )
snake_case : Any = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__a ,Namespace(foo=12 ,bar=3.14 ,baz="""42""" ,ces=["""a""", """b""", """c"""] ,des=[1, 2, 3] ) )
def snake_case_ ( self ) -> Any:
snake_case : Optional[Any] = HfArgumentParser(__a )
snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" ,nargs="""+""" ,type=__a ,required=__a )
expected.add_argument("""--required_str""" ,type=__a ,required=__a )
expected.add_argument(
"""--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=__a ,)
self.argparsersEqual(__a ,__a )
def snake_case_ ( self ) -> List[Any]:
snake_case : Optional[int] = HfArgumentParser(__a )
snake_case : Any = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=__a ,required=__a )
expected.add_argument(
"""--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=__a ,)
expected.add_argument("""--opt""" ,type=__a ,default=__a )
expected.add_argument("""--baz""" ,default="""toto""" ,type=__a ,help="""help message""" )
expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=__a )
self.argparsersEqual(__a ,__a )
def snake_case_ ( self ) -> Tuple:
snake_case : str = HfArgumentParser(__a )
snake_case : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case : List[str] = parser.parse_dict(__a )[0]
snake_case : str = BasicExample(**__a )
self.assertEqual(__a ,__a )
def snake_case_ ( self ) -> Any:
snake_case : Tuple = HfArgumentParser(__a )
snake_case : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__a ,parser.parse_dict ,__a ,allow_extra_keys=__a )
def snake_case_ ( self ) -> Optional[int]:
snake_case : List[str] = HfArgumentParser(__a )
snake_case : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Dict = os.path.join(__a ,"""temp_json""" )
os.mkdir(__a )
with open(temp_local_path + """.json""" ,"""w+""" ) as f:
json.dump(__a ,__a )
snake_case : Any = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case : List[str] = BasicExample(**__a )
self.assertEqual(__a ,__a )
def snake_case_ ( self ) -> str:
snake_case : Union[str, Any] = HfArgumentParser(__a )
snake_case : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : str = os.path.join(__a ,"""temp_yaml""" )
os.mkdir(__a )
with open(temp_local_path + """.yaml""" ,"""w+""" ) as f:
yaml.dump(__a ,__a )
snake_case : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case : Tuple = BasicExample(**__a )
self.assertEqual(__a ,__a )
def snake_case_ ( self ) -> List[str]:
snake_case : Union[str, Any] = HfArgumentParser(__a )
self.assertIsNotNone(__a )
| 116 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 718 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __A(lowerCAmelCase ) -> Any:
"""simple docstring"""
_UpperCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def __A(lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
_UpperCamelCase = emb.weight.data
return lin_layer
def __A(lowerCAmelCase ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
_UpperCamelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
_UpperCamelCase = checkpoint["""model"""]
remove_ignore_keys_(lowerCAmelCase )
_UpperCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
_UpperCamelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
_UpperCamelCase = XGLMConfig(
vocab_size=lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_UpperCamelCase = XGLMForCausalLM(lowerCAmelCase )
_UpperCamelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
print(lowerCAmelCase )
_UpperCamelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 202 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
SCREAMING_SNAKE_CASE__ : List[Any] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = """pytorch_model.bin"""
@dataclasses.dataclass
class UpperCAmelCase_ :
__lowerCamelCase = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
__lowerCamelCase = dataclasses.field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class UpperCAmelCase_ :
__lowerCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
__lowerCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
__lowerCamelCase = dataclasses.field(
default=__lowerCamelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
__lowerCamelCase = dataclasses.field(
default=__lowerCamelCase , metadata={'help': 'The name of the task to train on.'} , )
__lowerCamelCase = dataclasses.field(
default=__lowerCamelCase , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class UpperCAmelCase_ :
__lowerCamelCase = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
__lowerCamelCase = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
__lowerCamelCase = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
__lowerCamelCase = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__lowerCamelCase = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__lowerCamelCase = dataclasses.field(
default=__lowerCamelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__lowerCamelCase = dataclasses.field(
default=__lowerCamelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__lowerCamelCase = dataclasses.field(
default=__lowerCamelCase , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__lowerCamelCase = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__lowerCamelCase = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__lowerCamelCase = dataclasses.field(
default=__lowerCamelCase , metadata={'help': 'Random seed for initialization.'} , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase__ : Any = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase__ : int = int(eval_result * len(__lowerCamelCase ) )
print(__lowerCamelCase )
UpperCAmelCase__ : Optional[int] = dataset.sort("""probability""" , reverse=__lowerCamelCase )
UpperCAmelCase__ : Dict = dataset.select(range(__lowerCamelCase ) )
UpperCAmelCase__ : str = dataset.remove_columns(["""label""", """probability"""] )
UpperCAmelCase__ : Any = dataset.rename_column("""prediction""" , """label""" )
UpperCAmelCase__ : int = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} )
UpperCAmelCase__ : Tuple = dataset.shuffle(seed=args.seed )
UpperCAmelCase__ : Dict = os.path.join(__lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase )
else:
dataset.to_json(__lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase__ : str = STModelArguments(model_name_or_path=__lowerCamelCase )
UpperCAmelCase__ : int = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase )
UpperCAmelCase__ : Dict = STTrainingArguments(output_dir=__lowerCamelCase )
UpperCAmelCase__ : Dict = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCamelCase ).items():
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for key, value in kwargs.items():
if hasattr(__lowerCamelCase , __lowerCamelCase ):
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Sanity checks
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : Dict = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase__ : Tuple = args.train_file
UpperCAmelCase__ : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase__ : Union[str, Any] = args.eval_file
for key in data_files:
UpperCAmelCase__ : List[str] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
UpperCAmelCase__ : Any = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
UpperCAmelCase__ : List[Any] = F"{args.output_dir}/self-train_iter-{{}}".format
UpperCAmelCase__ : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
accelerator.wait_for_everyone()
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : Optional[int] = False
# Show the progress bar
UpperCAmelCase__ : int = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase__ : Tuple = data_dir_format(__lowerCamelCase )
assert os.path.exists(__lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase__ : List[str] = os.path.join(__lowerCamelCase , """stage-1""" )
UpperCAmelCase__ : List[Any] = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ):
arguments_dict.update({key: value} )
UpperCAmelCase__ : List[Any] = os.path.join(__lowerCamelCase , """best-checkpoint""" , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , __lowerCamelCase , __lowerCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , __lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase__ : Any = os.path.join(__lowerCamelCase , """best-checkpoint""" )
UpperCAmelCase__ : Any = os.path.join(__lowerCamelCase , """stage-2""" )
# Update arguments_dict
UpperCAmelCase__ : Union[str, Any] = model_path
UpperCAmelCase__ : Optional[Any] = data_files["""train"""]
UpperCAmelCase__ : Tuple = current_output_dir
UpperCAmelCase__ : Union[str, Any] = os.path.join(__lowerCamelCase , """best-checkpoint""" , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , __lowerCamelCase , __lowerCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , __lowerCamelCase )
UpperCAmelCase__ : str = iteration
UpperCAmelCase__ : List[str] = data_dir_format(iteration + 1 )
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , """best-checkpoint""" ) )
UpperCAmelCase__ : Optional[Any] = config.idalabel
UpperCAmelCase__ : Tuple = os.path.join(__lowerCamelCase , """eval_results_best-checkpoint.json""" )
UpperCAmelCase__ : Dict = os.path.join(__lowerCamelCase , """test_results_best-checkpoint.json""" )
assert os.path.exists(__lowerCamelCase )
with open(__lowerCamelCase , """r""" ) as f:
UpperCAmelCase__ : Optional[Any] = float(json.load(__lowerCamelCase )[args.eval_metric] )
UpperCAmelCase__ : Tuple = os.path.join(__lowerCamelCase , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(__lowerCamelCase )
# Loading the dataset from local csv or json files.
UpperCAmelCase__ : Union[str, Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
UpperCAmelCase__ : List[Any] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(__lowerCamelCase ):
shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.wait_for_everyone()
UpperCAmelCase__ : Tuple = os.path.join(__lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase__ : Optional[Any] = eval_result
if best_iteration is None:
UpperCAmelCase__ : Tuple = new_iteration
UpperCAmelCase__ : Tuple = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase__ : List[str] = new_iteration
UpperCAmelCase__ : List[str] = new_eval_result
UpperCAmelCase__ : Optional[int] = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase__ : Tuple = new_iteration
UpperCAmelCase__ : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase__ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , __lowerCamelCase )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase , F"eval_results_iter-{iteration}.json" ) , os.path.join(__lowerCamelCase , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(__lowerCamelCase , """eval_results_best-iteration.json""" ) , )
| 79 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Any = data
UpperCAmelCase__ : List[Any] = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0]
@staticmethod
def __UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
return ((n << b) | (n >> (32 - b))) & 0Xffff_ffff
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase__ : Optional[int] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCAmelCase ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Dict = list(struct.unpack(""">16L""" , _lowerCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase__ : Optional[int] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = self.padding()
UpperCAmelCase__ : List[str] = self.split_blocks()
for block in self.blocks:
UpperCAmelCase__ : Tuple = self.expand_block(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase__ : Optional[int] = (b & c) | ((~b) & d)
UpperCAmelCase__ : int = 0X5a82_7999
elif 20 <= i < 40:
UpperCAmelCase__ : Tuple = b ^ c ^ d
UpperCAmelCase__ : int = 0X6ed9_eba1
elif 40 <= i < 60:
UpperCAmelCase__ : List[str] = (b & c) | (b & d) | (c & d)
UpperCAmelCase__ : Tuple = 0X8f1b_bcdc
elif 60 <= i < 80:
UpperCAmelCase__ : int = b ^ c ^ d
UpperCAmelCase__ : str = 0Xca62_c1d6
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.rotate(_lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff,
a,
self.rotate(_lowerCAmelCase , 30 ),
c,
d,
)
UpperCAmelCase__ : int = (
self.h[0] + a & 0Xffff_ffff,
self.h[1] + b & 0Xffff_ffff,
self.h[2] + c & 0Xffff_ffff,
self.h[3] + d & 0Xffff_ffff,
self.h[4] + e & 0Xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = B"""Test String"""
assert SHAaHash(__lowerCamelCase ).final_hash() == hashlib.shaa(__lowerCamelCase ).hexdigest() # noqa: S324
def _lowerCamelCase ( ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase__ : str = parser.parse_args()
UpperCAmelCase__ : Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase__ : List[Any] = f.read()
else:
UpperCAmelCase__ : int = bytes(__lowerCamelCase , """utf-8""" )
print(SHAaHash(__lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 79 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Optional[int] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase : int = Vector()
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : int = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase_ ) , '(0,0,0,0,0,1)' )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Optional[Any] = Vector([1, 2] )
lowerCamelCase : List[Any] = Vector([1, 2, 3, 4, 5] )
lowerCamelCase : Any = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : List[str] = Vector([1, 2, 3] )
lowerCamelCase : int = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : List[Any] = Vector([1, 2, 3] )
lowerCamelCase : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Union[str, Any] = Vector([1, 2, 3] )
lowerCamelCase : Tuple = Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def _UpperCamelCase ( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def _UpperCamelCase ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Tuple = Vector([1, 2, 3] )
lowerCamelCase : int = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase_ , UpperCAmelCase_ ) ) , '(3,4,7)' )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : int = Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase : Tuple = x.copy()
self.assertEqual(str(UpperCAmelCase_ ) , str(UpperCAmelCase_ ) )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Optional[int] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase_ ) , '(0,1,0)' )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(UpperCAmelCase_ ) )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase : Optional[int] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase : Dict = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase : List[Any] = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(UpperCAmelCase_ ) )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def _UpperCamelCase ( self ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 133 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_A = 'us-east-1' # defaults region
@dataclass
class _lowercase :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _UpperCamelCase ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def _UpperCamelCase ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = SageMakerTestEnvironment(framework=request.cls.framework )
| 133 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase_ = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
lowerCAmelCase_ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
lowerCAmelCase_ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase_ = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def a__ ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
__SCREAMING_SNAKE_CASE : Optional[int] = processors[data_args.task_name]()
__SCREAMING_SNAKE_CASE : int = processor.get_labels()
__SCREAMING_SNAKE_CASE : List[str] = len(lowerCAmelCase_ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
__SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__SCREAMING_SNAKE_CASE : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
__SCREAMING_SNAKE_CASE : int = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE : Tuple = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE : List[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
__SCREAMING_SNAKE_CASE : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowerCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowerCAmelCase_ )
return results
def a__ ( snake_case ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 74 |
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = len(lowerCAmelCase_ )
# We need to create solution object to save path.
_UpperCAmelCase : Optional[int] = [[0 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )]
_UpperCAmelCase : Tuple = run_maze(lowerCAmelCase_ , 0 , 0 , lowerCAmelCase_ )
if solved:
print("""\n""".join(str(lowerCAmelCase_ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = len(lowerCAmelCase_ )
# Final check point.
if i == j == (size - 1):
_UpperCAmelCase : Any = 1
return True
_UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
_UpperCAmelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_UpperCAmelCase : Any = 1
# check for directions
if (
run_maze(lowerCAmelCase_ , i + 1 , lowerCAmelCase_ , lowerCAmelCase_ )
or run_maze(lowerCAmelCase_ , lowerCAmelCase_ , j + 1 , lowerCAmelCase_ )
or run_maze(lowerCAmelCase_ , i - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
or run_maze(lowerCAmelCase_ , lowerCAmelCase_ , j - 1 , lowerCAmelCase_ )
):
return True
_UpperCAmelCase : int = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
snake_case__ : Optional[int] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
snake_case__ : str = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_SCREAMING_SNAKE_CASE )[0]
@deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.data to implement this functionality." )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream:
__lowercase = _readaa(_SCREAMING_SNAKE_CASE )
if magic != 2_0_5_1:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
__lowercase = _readaa(_SCREAMING_SNAKE_CASE )
__lowercase = _readaa(_SCREAMING_SNAKE_CASE )
__lowercase = _readaa(_SCREAMING_SNAKE_CASE )
__lowercase = bytestream.read(rows * cols * num_images )
__lowercase = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
__lowercase = data.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
return data
@deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.one_hot on tensors." )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = labels_dense.shape[0]
__lowercase = numpy.arange(_SCREAMING_SNAKE_CASE ) * num_classes
__lowercase = numpy.zeros((num_labels, num_classes) )
__lowercase = 1
return labels_one_hot
@deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.data to implement this functionality." )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1_0 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream:
__lowercase = _readaa(_SCREAMING_SNAKE_CASE )
if magic != 2_0_4_9:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
__lowercase = _readaa(_SCREAMING_SNAKE_CASE )
__lowercase = bytestream.read(_SCREAMING_SNAKE_CASE )
__lowercase = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return labels
class _A :
'''simple docstring'''
@deprecated(
lowerCamelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]=False , lowerCamelCase : str=False , lowerCamelCase : str=dtypes.floataa , lowerCamelCase : Optional[int]=True , lowerCamelCase : int=None , ):
'''simple docstring'''
__lowercase , __lowercase = random_seed.get_seed(lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowercase = dtypes.as_dtype(lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
__lowercase = 10_000
__lowercase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
__lowercase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowercase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowercase = images.astype(numpy.floataa )
__lowercase = numpy.multiply(lowerCamelCase , 1.0 / 255.0 )
__lowercase = images
__lowercase = labels
__lowercase = 0
__lowercase = 0
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return self._images
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
return self._labels
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self._num_examples
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
return self._epochs_completed
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=True ):
'''simple docstring'''
if fake_data:
__lowercase = [1] * 784
__lowercase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCamelCase )],
[fake_label for _ in range(lowerCamelCase )],
)
__lowercase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowercase = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
__lowercase = self.images[perma]
__lowercase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowercase = self._num_examples - start
__lowercase = self._images[start : self._num_examples]
__lowercase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowercase = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
__lowercase = self.images[perm]
__lowercase = self.labels[perm]
# Start next epoch
__lowercase = 0
__lowercase = batch_size - rest_num_examples
__lowercase = self._index_in_epoch
__lowercase = self._images[start:end]
__lowercase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowercase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_SCREAMING_SNAKE_CASE , "Please write your own downloading logic." )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not gfile.Exists(_SCREAMING_SNAKE_CASE ):
gfile.MakeDirs(_SCREAMING_SNAKE_CASE )
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not gfile.Exists(_SCREAMING_SNAKE_CASE ):
urllib.request.urlretrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # noqa: S310
with gfile.GFile(_SCREAMING_SNAKE_CASE ) as f:
__lowercase = f.size()
print("Successfully downloaded" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "bytes." )
return filepath
@deprecated(
_SCREAMING_SNAKE_CASE , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=dtypes.floataa , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0_0_0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE )
__lowercase = fake()
__lowercase = fake()
__lowercase = fake()
return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
if not source_url: # empty string check
__lowercase = DEFAULT_SOURCE_URL
__lowercase = "train-images-idx3-ubyte.gz"
__lowercase = "train-labels-idx1-ubyte.gz"
__lowercase = "t10k-images-idx3-ubyte.gz"
__lowercase = "t10k-labels-idx1-ubyte.gz"
__lowercase = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_images_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
__lowercase = _extract_images(_SCREAMING_SNAKE_CASE )
__lowercase = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_labels_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
__lowercase = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE )
__lowercase = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_images_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
__lowercase = _extract_images(_SCREAMING_SNAKE_CASE )
__lowercase = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_labels_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
__lowercase = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE )
if not 0 <= validation_size <= len(_SCREAMING_SNAKE_CASE ):
__lowercase = (
"Validation size should be between 0 and "
F"""{len(_SCREAMING_SNAKE_CASE )}. Received: {validation_size}."""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
__lowercase = train_images[:validation_size]
__lowercase = train_labels[:validation_size]
__lowercase = train_images[validation_size:]
__lowercase = train_labels[validation_size:]
__lowercase = {"dtype": dtype, "reshape": reshape, "seed": seed}
__lowercase = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowercase = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowercase = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
| 655 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : dict ):
'''simple docstring'''
lowerCamelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCamelCase_ = set()
return any(
node not in visited and depth_first_search(lowercase , lowercase , lowercase , lowercase )
for node in graph )
def _SCREAMING_SNAKE_CASE ( lowercase : dict , lowercase : int , lowercase : set , lowercase : set ):
'''simple docstring'''
visited.add(lowercase )
rec_stk.add(lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase , lowercase , lowercase , lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : str = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 0 |
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = len(A__ ) + 1
__lowerCamelCase = len(A__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCamelCase = [[0 for i in range(A__ )] for j in range(A__ )]
# since string of zero length match pattern of zero length
__lowerCamelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , A__ ):
__lowerCamelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , A__ ):
__lowerCamelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , A__ ):
for j in range(1 , A__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCamelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCamelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCamelCase = dp[i - 1][j]
else:
__lowerCamelCase = 0
else:
__lowerCamelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCAmelCase_ = 'aab'
UpperCAmelCase_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 80 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = '''data2vec-vision'''
def __init__( self : List[str] ,__A : Optional[Any]=768 ,__A : List[str]=12 ,__A : Tuple=12 ,__A : List[str]=3072 ,__A : Dict="gelu" ,__A : Any=0.0 ,__A : List[str]=0.0 ,__A : str=0.02 ,__A : Union[str, Any]=1e-12 ,__A : List[Any]=224 ,__A : List[str]=16 ,__A : Any=3 ,__A : Dict=False ,__A : str=False ,__A : Optional[Any]=False ,__A : Any=False ,__A : List[Any]=0.1 ,__A : int=0.1 ,__A : Optional[int]=True ,__A : Optional[Any]=[3, 5, 7, 11] ,__A : List[str]=[1, 2, 3, 6] ,__A : Optional[Any]=True ,__A : Tuple=0.4 ,__A : Tuple=256 ,__A : List[str]=1 ,__A : List[Any]=False ,__A : Tuple=255 ,**__A : Tuple ,) -> Any:
super().__init__(**__A )
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = use_mask_token
_lowercase = use_absolute_position_embeddings
_lowercase = use_relative_position_bias
_lowercase = use_shared_relative_position_bias
_lowercase = layer_scale_init_value
_lowercase = drop_path_rate
_lowercase = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowercase = out_indices
_lowercase = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowercase = use_auxiliary_head
_lowercase = auxiliary_loss_weight
_lowercase = auxiliary_channels
_lowercase = auxiliary_num_convs
_lowercase = auxiliary_concat_input
_lowercase = semantic_loss_ignore_index
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Tuple ) -> float:
return 1e-4 | 67 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Any = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class A__ ( A__ ):
A__ = 'marian'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[Any] , _a : List[Any]=5_8101 , _a : Union[str, Any]=None , _a : Dict=1024 , _a : List[str]=12 , _a : Tuple=4096 , _a : Optional[Any]=16 , _a : Optional[Any]=12 , _a : List[Any]=4096 , _a : Optional[int]=16 , _a : List[Any]=0.0 , _a : Dict=0.0 , _a : List[str]=True , _a : Tuple=True , _a : List[str]="gelu" , _a : Optional[int]=1024 , _a : str=0.1 , _a : Dict=0.0 , _a : Optional[int]=0.0 , _a : Any=0.02 , _a : str=5_8100 , _a : Union[str, Any]=False , _a : Optional[int]=5_8100 , _a : Any=0 , _a : List[str]=0 , _a : str=True , **_a : Union[str, Any] , ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =decoder_vocab_size or vocab_size
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =decoder_layerdrop
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
_SCREAMING_SNAKE_CASE =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
class A__ ( A__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_SCREAMING_SNAKE_CASE ={0: 'batch'}
_SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'decoder_sequence'}
_SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_a , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_SCREAMING_SNAKE_CASE =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.num_layers
for i in range(_a ):
_SCREAMING_SNAKE_CASE ={0: 'batch', 2: 'past_sequence + sequence'}
_SCREAMING_SNAKE_CASE ={0: 'batch', 2: 'past_sequence + sequence'}
else:
_SCREAMING_SNAKE_CASE =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE =super().outputs
else:
_SCREAMING_SNAKE_CASE =super(_a , self ).outputs
if self.use_past:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.num_layers
for i in range(_a ):
_SCREAMING_SNAKE_CASE ={0: 'batch', 2: 'past_sequence + sequence'}
_SCREAMING_SNAKE_CASE ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def A ( self : str , _a : PreTrainedTokenizer , _a : int = -1 , _a : int = -1 , _a : bool = False , _a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
# Generate decoder inputs
_SCREAMING_SNAKE_CASE =seq_length if not self.use_past else 1
_SCREAMING_SNAKE_CASE =self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
_SCREAMING_SNAKE_CASE ={f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_SCREAMING_SNAKE_CASE =dict(**_a , **_a )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =common_inputs['input_ids'].shape
_SCREAMING_SNAKE_CASE =common_inputs['decoder_input_ids'].shape[1]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.num_attention_heads
_SCREAMING_SNAKE_CASE =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_SCREAMING_SNAKE_CASE =decoder_seq_length + 3
_SCREAMING_SNAKE_CASE =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_SCREAMING_SNAKE_CASE =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_a , _a )] , dim=1 )
_SCREAMING_SNAKE_CASE =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.num_layers
_SCREAMING_SNAKE_CASE =min(_a , _a )
_SCREAMING_SNAKE_CASE =max(_a , _a ) - min_num_layers
_SCREAMING_SNAKE_CASE ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_a ):
common_inputs["past_key_values"].append(
(
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
) )
# TODO: test this.
_SCREAMING_SNAKE_CASE =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_a , _a ):
common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) )
return common_inputs
def A ( self : int , _a : PreTrainedTokenizer , _a : int = -1 , _a : int = -1 , _a : bool = False , _a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE =seqlen + 2
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.num_layers
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.num_attention_heads
_SCREAMING_SNAKE_CASE =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_SCREAMING_SNAKE_CASE =common_inputs['attention_mask'].dtype
_SCREAMING_SNAKE_CASE =torch.cat(
[common_inputs['attention_mask'], torch.ones(_a , _a , dtype=_a )] , dim=1 )
_SCREAMING_SNAKE_CASE =[
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a )
]
return common_inputs
def A ( self : Optional[int] , _a : PreTrainedTokenizer , _a : int = -1 , _a : int = -1 , _a : bool = False , _a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_SCREAMING_SNAKE_CASE =tokenizer.num_special_tokens_to_add(_a )
_SCREAMING_SNAKE_CASE =compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
_SCREAMING_SNAKE_CASE =[' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_SCREAMING_SNAKE_CASE =dict(tokenizer(_a , return_tensors=_a ) )
return common_inputs
def A ( self : List[Any] , _a : PreTrainedTokenizer , _a : int = -1 , _a : int = -1 , _a : bool = False , _a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
else:
_SCREAMING_SNAKE_CASE =self._generate_dummy_inputs_for_causal_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
return common_inputs
def A ( self : int , _a : Dict , _a : Optional[Any] , _a : Dict , _a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE =super()._flatten_past_key_values_(_a , _a , _a , _a )
else:
_SCREAMING_SNAKE_CASE =super(_a , self )._flatten_past_key_values_(
_a , _a , _a , _a )
@property
def A ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 405 | 0 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[str], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Any, ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
_SCREAMING_SNAKE_CASE : List[Any] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
assert base_extractor.is_extractable(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(lowerCamelCase__, lowerCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_SCREAMING_SNAKE_CASE : int = file_path.read_text(encoding="utf-8" )
else:
_SCREAMING_SNAKE_CASE : Any = output_path.read_text(encoding="utf-8" )
_SCREAMING_SNAKE_CASE : Optional[Any] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : int, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : int, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : int, lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any], ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
_SCREAMING_SNAKE_CASE : str = input_paths[compression_format]
if input_path is None:
_SCREAMING_SNAKE_CASE : Optional[int] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = Extractor.infer_extractor_format(lowerCamelCase__ )
assert extractor_format is not None
_SCREAMING_SNAKE_CASE : int = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_SCREAMING_SNAKE_CASE : Optional[Any] = file_path.read_text(encoding="utf-8" )
else:
_SCREAMING_SNAKE_CASE : List[str] = output_path.read_text(encoding="utf-8" )
_SCREAMING_SNAKE_CASE : Optional[Any] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : str ) -> Union[str, Any]:
import tarfile
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / "data_dot_dot"
directory.mkdir()
_SCREAMING_SNAKE_CASE : Union[str, Any] = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(lowerCamelCase__, "w" ) as f:
f.add(lowerCamelCase__, arcname=os.path.join("..", text_file.name ) )
return path
@pytest.fixture
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] ) -> Any:
import tarfile
_SCREAMING_SNAKE_CASE : Dict = tmp_path / "data_sym_link"
directory.mkdir()
_SCREAMING_SNAKE_CASE : Union[str, Any] = directory / "tar_file_with_sym_link.tar"
os.symlink("..", directory / "subdir", target_is_directory=lowerCamelCase__ )
with tarfile.TarFile(lowerCamelCase__, "w" ) as f:
f.add(str(directory / "subdir" ), arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], )
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str, lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : str, lowerCamelCase__ : int ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
_SCREAMING_SNAKE_CASE : int = insecure_tar_files[insecure_tar_file]
_SCREAMING_SNAKE_CASE : List[Any] = tmp_path / "extracted"
TarExtractor.extract(lowerCamelCase__, lowerCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> int:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_SCREAMING_SNAKE_CASE : Optional[Any] = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
_SCREAMING_SNAKE_CASE : Dict = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(lowerCamelCase__ )
assert zipfile.is_zipfile(str(lowerCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase__ ) # but we're right
| 701 |
"""simple docstring"""
import operator as op
def _lowerCAmelCase ( lowerCamelCase__ : Tuple ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : str = lambda lowerCamelCase__, lowerCamelCase__ : int(x / y ) # noqa: E731 integer division operation
_SCREAMING_SNAKE_CASE : Any = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ), "Action".center(1_2 ), "Stack", sep=" | " )
print("-" * (3_0 + len(lowerCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ("push(" + x + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
else:
_SCREAMING_SNAKE_CASE : Dict = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + b + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
_SCREAMING_SNAKE_CASE : Any = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + a + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
stack.append(
str(opr[x](int(lowerCamelCase__ ), int(lowerCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ("push(" + a + x + b + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | ", )
return int(stack[0] )
if __name__ == "__main__":
lowercase_ : int = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 295 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
# Automatically constructed
_SCREAMING_SNAKE_CASE = """dict"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = field(default="""Translation""" , init=__lowercase , repr=__lowercase )
def __call__( self : Any ) ->Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self : int ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
# Automatically constructed
_SCREAMING_SNAKE_CASE = """dict"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = field(default="""TranslationVariableLanguages""" , init=__lowercase , repr=__lowercase )
def lowercase_ ( self : Union[str, Any] ) ->Optional[Any]:
snake_case__ : List[Any] = sorted(set(self.languages ) ) if self.languages else None
snake_case__ : int = len(self.languages ) if self.languages else None
def __call__( self : List[str] ) ->str:
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self : Any, _snake_case : Optional[Any] ) ->int:
snake_case__ : Dict = set(self.languages )
if self.languages and set(_a ) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(_a ) - lang_set ) )}) are not in valid set ({", ".join(_a )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
snake_case__ : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(_a, _a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
snake_case__ , snake_case__ : Union[str, Any] = zip(*sorted(_a ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self : Any ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 478 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_a , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase_ = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_a )
lowerCAmelCase_ = CLIPTextModelWithProjection(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_a )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __a ( self , _a , _a=0 ) -> Dict:
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = image / 2 + 0.5
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.7_5,
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline(**_a )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = sd_pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __a ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> str:
pass
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline(**_a )
lowerCAmelCase_ = sd_pipe.to(_a )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
# forward without prompt embeds
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * ["this is a negative prompt"]
lowerCAmelCase_ = negative_prompt
lowerCAmelCase_ = 3 * [inputs["prompt"]]
lowerCAmelCase_ = sd_pipe(**_a )
lowerCAmelCase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * ["this is a negative prompt"]
lowerCAmelCase_ = 3 * [inputs.pop("prompt" )]
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = sd_pipe.encode_prompt(_a , negative_prompt=_a )
lowerCAmelCase_ = sd_pipe(
**_a , prompt_embeds=_a , negative_prompt_embeds=_a , pooled_prompt_embeds=_a , negative_pooled_prompt_embeds=_a , )
lowerCAmelCase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Optional[int]:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 122 | 0 |
'''simple docstring'''
from typing import Any
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not input_list:
return []
lowerCAmelCase__ : List[str] = [input_list.count(UpperCamelCase ) for value in input_list]
lowerCAmelCase__ : Dict = max(UpperCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCAmelCase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCAmelCase ( _a ):
"""simple docstring"""
def __init__( self : Tuple , A_ : List[str] , A_ : Any=13 , A_ : List[str]=7 , A_ : Tuple=True , A_ : Dict=True , A_ : Any=False , A_ : Optional[int]=True , A_ : Tuple=99 , A_ : List[Any]=32 , A_ : Tuple=5 , A_ : Optional[int]=4 , A_ : Optional[int]=37 , A_ : Any="gelu" , A_ : Optional[int]=0.1 , A_ : Dict=0.1 , A_ : List[Any]=5_12 , A_ : str=16 , A_ : Optional[int]=2 , A_ : Dict=0.02 , A_ : List[str]=3 , A_ : int=4 , A_ : Optional[int]=None , )-> Dict:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def A ( self : Optional[Any] )-> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Any )-> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : List[Any] , A_ : Union[str, Any] , A_ : int , A_ : Dict , A_ : str , A_ : Union[str, Any] , A_ : Any )-> Optional[Any]:
__UpperCamelCase = DistilBertModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_ , A_ )
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , A_ : Optional[Any] , A_ : Optional[Any] , A_ : List[str] , A_ : List[str] , A_ : int , A_ : List[Any] )-> Any:
__UpperCamelCase = DistilBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , A_ : List[Any] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : Any )-> int:
__UpperCamelCase = DistilBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , A_ : int , A_ : Any , A_ : List[str] , A_ : List[Any] , A_ : Dict , A_ : Optional[Any] )-> Any:
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , A_ : List[str] , A_ : Union[str, Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : Optional[int] )-> str:
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Any , A_ : Union[str, Any] , A_ : Dict , A_ : List[Any] , A_ : List[Any] , A_ : Dict , A_ : str )-> List[str]:
__UpperCamelCase = self.num_choices
__UpperCamelCase = DistilBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Dict )-> Union[str, Any]:
__UpperCamelCase = self.prepare_config_and_inputs()
(__UpperCamelCase) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case : str = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Optional[Any] = True
_snake_case : List[Any] = True
_snake_case : int = True
_snake_case : str = True
def A ( self : Union[str, Any] )-> Optional[int]:
__UpperCamelCase = DistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=A_ , dim=37 )
def A ( self : Tuple )-> int:
self.config_tester.run_common_tests()
def A ( self : List[str] )-> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A_ )
def A ( self : int )-> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A_ )
def A ( self : Any )-> List[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A_ )
def A ( self : Dict )-> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A_ )
def A ( self : Union[str, Any] )-> List[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A_ )
def A ( self : List[Any] )-> List[str]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A_ )
@slow
def A ( self : List[str] )-> Tuple:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = DistilBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def A ( self : Optional[Any] )-> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase = True
__UpperCamelCase = model_class(config=A_ )
__UpperCamelCase = self._prepare_for_class(A_ , A_ )
__UpperCamelCase = torch.jit.trace(
A_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_ , os.path.join(A_ , "traced_model.pt" ) )
__UpperCamelCase = torch.jit.load(os.path.join(A_ , "traced_model.pt" ) , map_location=A_ )
loaded(inputs_dict["input_ids"].to(A_ ) , inputs_dict["attention_mask"].to(A_ ) )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : int )-> Tuple:
__UpperCamelCase = DistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase = model(A_ , attention_mask=A_ )[0]
__UpperCamelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , A_ )
__UpperCamelCase = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) ) | 505 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = question_encoder
snake_case_ : Optional[int] = generator
snake_case_ : Optional[Any] = self.question_encoder
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' )
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(__magic_name__ )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.question_encoder
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.generator
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __magic_name__ , )
if max_length is None:
snake_case_ : Dict = self.current_tokenizer.model_max_length
snake_case_ : List[str] = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Optional[int] = self.current_tokenizer.model_max_length
snake_case_ : Union[str, Any] = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
snake_case_ : str = labels['''input_ids''']
return model_inputs
| 60 | 0 |
'''simple docstring'''
from math import pi, sqrt, tan
def a_ ( lowerCamelCase : float ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def a_ ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a_ ( lowerCamelCase : float ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def a_ ( lowerCamelCase : float ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a_ ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(lowerCamelCase , 2 ) * torus_radius * tube_radius
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def a_ ( lowerCamelCase : float ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def a_ ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase = (sidea + sidea + sidea) / 2
lowerCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def a_ ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def a_ ( lowerCamelCase : float ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def a_ ( lowerCamelCase : int , lowerCamelCase : float ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print("""\nSurface Areas of various geometric shapes: \n""")
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 700 |
'''simple docstring'''
import math
def a_ ( lowerCamelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( lowerCamelCase : int = 10001 ):
try:
lowerCAmelCase = int(lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
lowerCAmelCase = []
lowerCAmelCase = 2
while len(lowerCamelCase ) < nth:
if is_prime(lowerCamelCase ):
primes.append(lowerCamelCase )
num += 1
else:
num += 1
return primes[len(lowerCamelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 513 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : Dict , __A : Union[str, Any] , __A : bool = True , __A : bool = False ):
__A : Any = scheduler
__A : Optional[Any] = optimizers if isinstance(__A , (list, tuple) ) else [optimizers]
__A : Union[str, Any] = split_batches
__A : List[str] = step_with_optimizer
__A : int = GradientState()
def lowerCAmelCase_ ( self : Dict , *__A : int , **__A : List[Any] ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__A , **__A )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__A , **__A )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A : str = AcceleratorState().num_processes
for _ in range(__A ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__A , **__A )
else:
self.scheduler.step(*__A , **__A )
def lowerCAmelCase_ ( self : Any ):
return self.scheduler.get_last_lr()
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.scheduler.state_dict()
def lowerCAmelCase_ ( self : int , __A : int ):
self.scheduler.load_state_dict(__A )
def lowerCAmelCase_ ( self : Optional[int] ):
return self.scheduler.get_lr()
def lowerCAmelCase_ ( self : List[Any] , *__A : Optional[Any] , **__A : List[str] ):
return self.scheduler.print_lr(*__A , **__A )
| 17 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase: List[Any] = logging.get_logger(__name__)
__UpperCamelCase: Dict = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "luke"
def __init__( self: Union[str, Any], lowerCamelCase_: str=50267, lowerCamelCase_: Dict=500000, lowerCamelCase_: Union[str, Any]=768, lowerCamelCase_: Dict=256, lowerCamelCase_: int=12, lowerCamelCase_: Union[str, Any]=12, lowerCamelCase_: str=3072, lowerCamelCase_: List[Any]="gelu", lowerCamelCase_: int=0.1, lowerCamelCase_: Optional[Any]=0.1, lowerCamelCase_: Any=512, lowerCamelCase_: List[str]=2, lowerCamelCase_: str=0.0_2, lowerCamelCase_: Union[str, Any]=1E-12, lowerCamelCase_: List[Any]=True, lowerCamelCase_: List[Any]=None, lowerCamelCase_: Optional[Any]=1, lowerCamelCase_: Dict=0, lowerCamelCase_: List[Any]=2, **lowerCamelCase_: Optional[Any], ):
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowercase__ : Tuple = vocab_size
lowercase__ : Any = entity_vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : List[str] = entity_emb_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = hidden_act
lowercase__ : Optional[int] = intermediate_size
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : List[Any] = use_entity_aware_attention
lowercase__ : Optional[Any] = classifier_dropout
| 266 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Dict = torch.device("cpu")
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = dct.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = val
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = []
for k in state_dict.keys():
_UpperCAmelCase = k
if ".pwconv" in k:
_UpperCAmelCase = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
_UpperCAmelCase = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
_UpperCAmelCase = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
_UpperCAmelCase = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
_UpperCAmelCase = k_new.split('''.''' )
if ls[2].isdigit():
_UpperCAmelCase = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
_UpperCAmelCase = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCAmelCase = 1000
_UpperCAmelCase = '''huggingface/label-files'''
_UpperCAmelCase = '''imagenet-1k-id2label.json'''
_UpperCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
_UpperCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_UpperCAmelCase = [3, 3, 6, 4]
_UpperCAmelCase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_UpperCAmelCase = [3, 3, 9, 6]
_UpperCAmelCase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_UpperCAmelCase = [4, 3, 10, 5]
_UpperCAmelCase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_UpperCAmelCase = [4, 4, 12, 6]
_UpperCAmelCase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
_UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' , check_hash=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_UpperCAmelCase = checkpoint
_UpperCAmelCase = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
_UpperCAmelCase = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
_UpperCAmelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# compare outputs from both models
_UpperCAmelCase = get_expected_output(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
__A : Dict = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 710 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] , __UpperCamelCase : float = 0.9_9_9_9 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[float, int] = 1.0 , __UpperCamelCase : Union[float, int] = 2 / 3 , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Dict[str, Any] = None , **__UpperCamelCase : Optional[Any] , )->Tuple:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase = True
if kwargs.get('''max_value''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''min_value''']
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
self.to(device=kwargs['''device'''] )
_UpperCAmelCase = None
_UpperCAmelCase = decay
_UpperCAmelCase = min_decay
_UpperCAmelCase = update_after_step
_UpperCAmelCase = use_ema_warmup
_UpperCAmelCase = inv_gamma
_UpperCAmelCase = power
_UpperCAmelCase = 0
_UpperCAmelCase = None # set in `step()`
_UpperCAmelCase = model_cls
_UpperCAmelCase = model_config
@classmethod
def lowercase__ ( cls : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Any )->"EMAModel":
_UpperCAmelCase , _UpperCAmelCase = model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase )
_UpperCAmelCase = model_cls.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCamelCase )
return ema_model
def lowercase__ ( self : str , __UpperCamelCase : Any )->Optional[Any]:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_UpperCAmelCase = self.model_cls.from_config(self.model_config )
_UpperCAmelCase = self.state_dict()
state_dict.pop('''shadow_params''' , __UpperCamelCase )
model.register_to_config(**__UpperCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCamelCase )
def lowercase__ ( self : int , __UpperCamelCase : int )->float:
_UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase = (1 + step) / (1_0 + step)
_UpperCAmelCase = min(__UpperCamelCase , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase = max(__UpperCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->Optional[Any]:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_UpperCAmelCase = parameters.parameters()
_UpperCAmelCase = list(__UpperCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase = self.get_decay(self.optimization_step )
_UpperCAmelCase = decay
_UpperCAmelCase = 1 - decay
_UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase = deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCamelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
_UpperCAmelCase = list(__UpperCamelCase )
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : List[str] , __UpperCamelCase : str=None , __UpperCamelCase : Union[str, Any]=None )->None:
_UpperCAmelCase = [
p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] )->dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Any , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
_UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase = None
def lowercase__ ( self : Any , __UpperCamelCase : dict )->None:
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
_UpperCAmelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_UpperCAmelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __UpperCamelCase ):
raise ValueError('''Invalid min_decay''' )
_UpperCAmelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCamelCase ):
raise ValueError('''Invalid optimization_step''' )
_UpperCAmelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCamelCase ):
raise ValueError('''Invalid update_after_step''' )
_UpperCAmelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCamelCase ):
raise ValueError('''Invalid use_ema_warmup''' )
_UpperCAmelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_UpperCAmelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_UpperCAmelCase = state_dict.get('''shadow_params''' , __UpperCamelCase )
if shadow_params is not None:
_UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , __UpperCamelCase ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 95 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_lowerCamelCase ={
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_lowerCamelCase =AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = create_model(
"""HTSAT-tiny""", """roberta""", lowerCAmelCase_, precision="""fp32""", device="""cuda:0""" if torch.cuda.is_available() else """cpu""", enable_fusion=lowerCAmelCase_, fusion_type="""aff_2d""" if enable_fusion else None, )
return model, model_cfg
def _a ( lowerCamelCase ):
lowerCamelCase : str = {}
lowerCamelCase : str = R""".*sequential.(\d+).*"""
lowerCamelCase : Optional[int] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase : List[str] = key.replace(lowerCAmelCase_, lowerCAmelCase_ )
if re.match(lowerCAmelCase_, lowerCAmelCase_ ):
# replace sequential layers with list
lowerCamelCase : Dict = re.match(lowerCAmelCase_, lowerCAmelCase_ ).group(1 )
lowerCamelCase : Tuple = key.replace(F'''sequential.{sequential_layer}.''', F'''layers.{int(lowerCAmelCase_ )//3}.linear.''' )
elif re.match(lowerCAmelCase_, lowerCAmelCase_ ):
lowerCamelCase : str = int(re.match(lowerCAmelCase_, lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCamelCase : int = 1 if projecton_layer == 0 else 2
lowerCamelCase : Optional[Any] = key.replace(F'''_projection.{projecton_layer}.''', F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCamelCase : Dict = value
lowerCamelCase : List[str] = mixed_qkv.size(0 ) // 3
lowerCamelCase : Optional[int] = mixed_qkv[:qkv_dim]
lowerCamelCase : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCamelCase : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
lowerCamelCase : Optional[Any] = query_layer
lowerCamelCase : Any = key_layer
lowerCamelCase : Union[str, Any] = value_layer
else:
lowerCamelCase : List[str] = value
return model_state_dict
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase , lowerCamelCase : List[str] = init_clap(lowerCAmelCase_, enable_fusion=lowerCAmelCase_ )
clap_model.eval()
lowerCamelCase : Union[str, Any] = clap_model.state_dict()
lowerCamelCase : str = rename_state_dict(lowerCAmelCase_ )
lowerCamelCase : int = ClapConfig()
lowerCamelCase : Optional[Any] = enable_fusion
lowerCamelCase : int = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_lowerCamelCase =parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 681 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 61 | 0 |
import argparse
import datetime
def a_ ( _A ) -> str:
"""simple docstring"""
snake_case__ = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
snake_case__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_A ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
snake_case__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
snake_case__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
snake_case__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
snake_case__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
snake_case__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
snake_case__ = datetime.date(int(_A ) , int(_A ) , int(_A ) )
# Start math
if m <= 2:
snake_case__ = y - 1
snake_case__ = m + 12
# maths var
snake_case__ = int(str(_A )[:2] )
snake_case__ = int(str(_A )[2:] )
snake_case__ = int(2.6 * m - 5.39 )
snake_case__ = int(c / 4 )
snake_case__ = int(k / 4 )
snake_case__ = int(d + k )
snake_case__ = int(t + u + v + x )
snake_case__ = int(z - (2 * c) )
snake_case__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
snake_case__ = f'''Your date {date_input}, is a {days[str(_A )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : str = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
__UpperCamelCase : List[Any] = parser.parse_args()
zeller(args.date_input)
| 372 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: Any ) -> List[Any]:
snake_case__ = data
snake_case__ = None
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> Optional[Any]:
snake_case__ = None
snake_case__ = None
def __iter__( self: Optional[int] ) -> Iterator[Any]:
snake_case__ = self.head
while self.head:
yield node.data
snake_case__ = node.next
if node == self.head:
break
def __len__( self: int ) -> int:
return sum(1 for _ in self )
def __repr__( self: int ) -> Optional[int]:
return "->".join(str(UpperCamelCase ) for item in iter(self ) )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> None:
self.insert_nth(len(self ) , UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Any ) -> None:
self.insert_nth(0 , UpperCamelCase )
def lowerCAmelCase_ ( self: int , UpperCamelCase: int , UpperCamelCase: Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
snake_case__ = Node(UpperCamelCase )
if self.head is None:
snake_case__ = new_node # first node points itself
snake_case__ = snake_case__ = new_node
elif index == 0: # insert at head
snake_case__ = self.head
snake_case__ = snake_case__ = new_node
else:
snake_case__ = self.head
for _ in range(index - 1 ):
snake_case__ = temp.next
snake_case__ = temp.next
snake_case__ = new_node
if index == len(self ) - 1: # insert at tail
snake_case__ = new_node
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
return self.delete_nth(0 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase_ ( self: str , UpperCamelCase: int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
snake_case__ = self.head
if self.head == self.tail: # just one node
snake_case__ = snake_case__ = None
elif index == 0: # delete head node
snake_case__ = self.tail.next.next
snake_case__ = self.head.next
else:
snake_case__ = self.head
for _ in range(index - 1 ):
snake_case__ = temp.next
snake_case__ = temp.next
snake_case__ = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case__ = temp
return delete_node.data
def lowerCAmelCase_ ( self: Union[str, Any] ) -> bool:
return len(self ) == 0
def a_ ( ) -> None:
"""simple docstring"""
snake_case__ = CircularLinkedList()
assert len(_A ) == 0
assert circular_linked_list.is_empty() is True
assert str(_A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_A ) == i
circular_linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 | 1 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( lowercase_):
"""simple docstring"""
snake_case__ : Tuple =['''image_processor''', '''tokenizer''']
snake_case__ : str ='''AutoImageProcessor'''
snake_case__ : Optional[int] ='''AutoTokenizer'''
def __init__( self: Optional[int] , __a: int=None , __a: List[Any]=None , **__a: Dict )-> Tuple:
lowerCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase_ , )
lowerCamelCase : Optional[int] = kwargs.pop("""feature_extractor""" )
lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase : List[str] = self.image_processor
lowerCamelCase : List[Any] = False
def __call__( self: Dict , *__a: Union[str, Any] , **__a: List[Any] )-> Union[str, Any]:
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_ , **lowerCamelCase_ )
lowerCamelCase : List[Any] = kwargs.pop("""images""" , lowerCamelCase_ )
lowerCamelCase : List[str] = kwargs.pop("""text""" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase : Dict = args[0]
lowerCamelCase : List[Any] = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCamelCase : int = self.image_processor(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if text is not None:
lowerCamelCase : Dict = self.tokenizer(lowerCamelCase_ , **lowerCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase : Optional[Any] = encodings["""input_ids"""]
return inputs
def a__ ( self: Any , *__a: Union[str, Any] , **__a: Optional[int] )-> Optional[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def a__ ( self: str , *__a: Tuple , **__a: List[str] )-> List[str]:
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@contextmanager
def a__ ( self: List[str] )-> Tuple:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
lowerCamelCase : Optional[int] = True
lowerCamelCase : str = self.tokenizer
yield
lowerCamelCase : List[Any] = self.image_processor
lowerCamelCase : Optional[Any] = False
def a__ ( self: Tuple , __a: List[Any] , __a: Tuple=False , __a: List[Any]=None )-> List[Any]:
if added_vocab is None:
lowerCamelCase : Optional[int] = self.tokenizer.get_added_vocab()
lowerCamelCase : Any = {}
while tokens:
lowerCamelCase : Optional[Any] = re.search(r"""<s_(.*?)>""" , lowerCamelCase_ , re.IGNORECASE )
if start_token is None:
break
lowerCamelCase : Optional[int] = start_token.group(1 )
lowerCamelCase : List[str] = re.search(rf'</s_{key}>' , lowerCamelCase_ , re.IGNORECASE )
lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
lowerCamelCase : Tuple = tokens.replace(lowerCamelCase_ , """""" )
else:
lowerCamelCase : Any = end_token.group()
lowerCamelCase : Union[str, Any] = re.escape(lowerCamelCase_ )
lowerCamelCase : Any = re.escape(lowerCamelCase_ )
lowerCamelCase : int = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' , lowerCamelCase_ , re.IGNORECASE )
if content is not None:
lowerCamelCase : str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase : Any = self.tokenajson(lowerCamelCase_ , is_inner_value=lowerCamelCase_ , added_vocab=lowerCamelCase_ )
if value:
if len(lowerCamelCase_ ) == 1:
lowerCamelCase : List[Any] = value[0]
lowerCamelCase : List[str] = value
else: # leaf nodes
lowerCamelCase : str = []
for leaf in content.split(r"""<sep/>""" ):
lowerCamelCase : Union[str, Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCamelCase_ )
if len(output[key] ) == 1:
lowerCamelCase : Optional[Any] = output[key][0]
lowerCamelCase : Union[str, Any] = tokens[tokens.find(lowerCamelCase_ ) + len(lowerCamelCase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCamelCase_ , added_vocab=lowerCamelCase_ )
if len(lowerCamelCase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def a__ ( self: Optional[int] )-> List[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase_ , )
return self.image_processor_class
@property
def a__ ( self: List[str] )-> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase_ , )
return self.image_processor
| 222 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = """▁"""
__UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = BigBirdTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = """<s>"""
SCREAMING_SNAKE_CASE : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowerCamelCase_ ) , 10_04 )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = """I was born in 92000, and this is falsé."""
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BigBirdTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = """Hello World!"""
SCREAMING_SNAKE_CASE : Tuple = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
SCREAMING_SNAKE_CASE : Dict = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
SCREAMING_SNAKE_CASE : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE : List[str] = """ """.join(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.big_tokenizer.encode_plus(lowerCamelCase_ , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = BigBirdConfig(attention_type="""original_full""" )
SCREAMING_SNAKE_CASE : Tuple = BigBirdModel(lowerCamelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 379 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ :Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ :Dict = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = CamembertTokenizer
_A : Optional[int] = CamembertTokenizerFast
_A : str = True
_A : str = True
def A_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = CamembertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = '''<pad>'''
__UpperCAmelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowercase ) , 1_004 )
def A_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = CamembertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = '''I was born in 92000, and this is falsé.'''
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[str] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
__UpperCAmelCase : List[str] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def A_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : List[str] = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[int] = '''I was born in 92000, and this is falsé.'''
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : int = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : int = self.get_rust_tokenizer()
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : Optional[int] = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = {'''input_ids''': [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__UpperCAmelCase : Tuple = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=__lowercase , ) | 713 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
__UpperCAmelCase : Any = flatten_dict(UpperCAmelCase_ )
return flax_params
def lowerCamelCase_ ( UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Any = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
__UpperCAmelCase : Optional[int] = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCAmelCase : Optional[Any] = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCAmelCase : Dict = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCAmelCase : Dict = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCAmelCase : Any = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , UpperCAmelCase_ )
__UpperCAmelCase : Dict = flax_dict[key]
__UpperCAmelCase : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCAmelCase : List[str] = torch.from_numpy(converted_dict[key].T )
else:
__UpperCAmelCase : Optional[int] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=False ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : List[Any] = get_flax_param(UpperCAmelCase_ )
if not use_large:
__UpperCAmelCase : List[str] = PixaStructVisionConfig()
__UpperCAmelCase : Optional[Any] = PixaStructTextConfig()
else:
__UpperCAmelCase : Optional[Any] = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCAmelCase : Optional[Any] = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCAmelCase : Dict = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = PixaStructForConditionalGeneration(UpperCAmelCase_ )
__UpperCAmelCase : Any = rename_and_convert_flax_params(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
__UpperCAmelCase : Union[str, Any] = PixaStructImageProcessor()
__UpperCAmelCase : Optional[int] = PixaStructProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
if use_large:
__UpperCAmelCase : str = 40_96
__UpperCAmelCase : str = True
# mkdir if needed
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
print('''Model saved in {}'''.format(UpperCAmelCase_ ) )
if __name__ == "__main__":
lowercase__ :List[str] = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowercase__ :Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
) | 374 | 0 |
'''simple docstring'''
UpperCAmelCase = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 119 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_SCREAMING_SNAKE_CASE : Dict = random.Random()
if is_torch_available():
import torch
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if rng is None:
__magic_name__ : Optional[Any] = global_rng
__magic_name__ : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: str , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: int=7 , __UpperCamelCase: Union[str, Any]=400 , __UpperCamelCase: Dict=2000 , __UpperCamelCase: int=1 , __UpperCamelCase: List[str]=0.0 , __UpperCamelCase: List[Any]=1_6000 , __UpperCamelCase: str=True , __UpperCamelCase: Union[str, Any]=True , ) -> Dict:
__magic_name__ : List[Any] = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = min_seq_length
__magic_name__ : Optional[int] = max_seq_length
__magic_name__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__magic_name__ : List[str] = feature_size
__magic_name__ : Union[str, Any] = padding_value
__magic_name__ : int = sampling_rate
__magic_name__ : str = return_attention_mask
__magic_name__ : List[str] = do_normalize
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Tuple=False , __UpperCamelCase: List[Any]=False ) -> Union[str, Any]:
def _flatten(__UpperCamelCase: Union[str, Any] ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
__magic_name__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__magic_name__ : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ : Tuple = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = ASTFeatureExtractor
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
__magic_name__ : str = ASTFeatureExtractionTester(self )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__magic_name__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__magic_name__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__magic_name__ : str = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__magic_name__ : List[Any] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__magic_name__ : Any = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test batched
__magic_name__ : List[str] = feat_extract(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="np" ).input_values
__magic_name__ : List[str] = feat_extract(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__magic_name__ : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__magic_name__ : Tuple = np.asarray(__UpperCamelCase )
__magic_name__ : List[Any] = feat_extract(__UpperCamelCase , return_tensors="np" ).input_values
__magic_name__ : Dict = feat_extract(__UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
import torch
__magic_name__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ : Tuple = np.random.rand(100 ).astype(np.floataa )
__magic_name__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__magic_name__ : Optional[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__magic_name__ : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: Dict ) -> Tuple:
from datasets import load_dataset
__magic_name__ : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__magic_name__ : Optional[int] = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCAmelCase__ ( self: Any ) -> int:
# fmt: off
__magic_name__ : str = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__magic_name__ : List[Any] = self._load_datasamples(1 )
__magic_name__ : Any = ASTFeatureExtractor()
__magic_name__ : Tuple = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __UpperCamelCase , atol=1E-4 ) ) | 436 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''camembert'''
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=3_0522 , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[str]=1E-12 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : int="absolute" , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@property
def _A ( self : Optional[int] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 488 | 0 |
from __future__ import annotations
from cmath import sqrt
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
lowerCamelCase : str = b * b - 4 * a * c
lowerCamelCase : str = (-b + sqrt(lowerCamelCase )) / (2 * a)
lowerCamelCase : Any = (-b - sqrt(lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ):
lowerCamelCase , lowerCamelCase : Optional[Any] = quadratic_roots(a=5, b=6, c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCamelCase = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCamelCase = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCamelCase = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
UpperCamelCase = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
UpperCamelCase = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str=False ):
"""simple docstring"""
lowerCAmelCase__ = checkpoint[F'{old_prefix}.in_layers.0.weight']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.in_layers.0.bias']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.in_layers.2.weight']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.in_layers.2.bias']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.emb_layers.1.weight']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.emb_layers.1.bias']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.out_layers.0.weight']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.out_layers.0.bias']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.out_layers.3.weight']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
lowerCAmelCase__ = checkpoint[F'{old_prefix}.skip_connection.weight']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int=None ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
lowerCAmelCase__ = checkpoint[F'{old_prefix}.norm.weight']
lowerCAmelCase__ = checkpoint[F'{old_prefix}.norm.bias']
lowerCAmelCase__ = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase__ = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase__ = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase__ = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase__ = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase__ = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase__ = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase__ = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location="cpu" )
lowerCAmelCase__ = {}
lowerCAmelCase__ = checkpoint["time_embed.0.weight"]
lowerCAmelCase__ = checkpoint["time_embed.0.bias"]
lowerCAmelCase__ = checkpoint["time_embed.2.weight"]
lowerCAmelCase__ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase__ = checkpoint["label_emb.weight"]
lowerCAmelCase__ = checkpoint["input_blocks.0.0.weight"]
lowerCAmelCase__ = checkpoint["input_blocks.0.0.bias"]
lowerCAmelCase__ = unet_config["down_block_types"]
lowerCAmelCase__ = unet_config["layers_per_block"]
lowerCAmelCase__ = unet_config["attention_head_dim"]
lowerCAmelCase__ = unet_config["block_out_channels"]
lowerCAmelCase__ = 1
lowerCAmelCase__ = channels_list[0]
for i, layer_type in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ = channels_list[i]
lowerCAmelCase__ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCAmelCase_ ):
lowerCAmelCase__ = F'down_blocks.{i}.resnets.{j}'
lowerCAmelCase__ = F'input_blocks.{current_layer}.0'
lowerCAmelCase__ = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCAmelCase_ ):
lowerCAmelCase__ = F'down_blocks.{i}.resnets.{j}'
lowerCAmelCase__ = F'input_blocks.{current_layer}.0'
lowerCAmelCase__ = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ )
lowerCAmelCase__ = F'down_blocks.{i}.attentions.{j}'
lowerCAmelCase__ = F'input_blocks.{current_layer}.1'
lowerCAmelCase__ = convert_attention(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
current_layer += 1
if i != len(lowerCAmelCase_ ) - 1:
lowerCAmelCase__ = F'down_blocks.{i}.downsamplers.0'
lowerCAmelCase__ = F'input_blocks.{current_layer}.0'
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
current_layer += 1
lowerCAmelCase__ = current_channels
# hardcoded the mid-block for now
lowerCAmelCase__ = "mid_block.resnets.0"
lowerCAmelCase__ = "middle_block.0"
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = "mid_block.attentions.0"
lowerCAmelCase__ = "middle_block.1"
lowerCAmelCase__ = convert_attention(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = "mid_block.resnets.1"
lowerCAmelCase__ = "middle_block.2"
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = unet_config["up_block_types"]
for i, layer_type in enumerate(lowerCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase__ = F'up_blocks.{i}.resnets.{j}'
lowerCAmelCase__ = F'output_blocks.{current_layer}.0'
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ )
current_layer += 1
if i != len(lowerCAmelCase_ ) - 1:
lowerCAmelCase__ = F'up_blocks.{i}.upsamplers.0'
lowerCAmelCase__ = F'output_blocks.{current_layer-1}.1'
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase__ = F'up_blocks.{i}.resnets.{j}'
lowerCAmelCase__ = F'output_blocks.{current_layer}.0'
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ )
lowerCAmelCase__ = F'up_blocks.{i}.attentions.{j}'
lowerCAmelCase__ = F'output_blocks.{current_layer}.1'
lowerCAmelCase__ = convert_attention(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
current_layer += 1
if i != len(lowerCAmelCase_ ) - 1:
lowerCAmelCase__ = F'up_blocks.{i}.upsamplers.0'
lowerCAmelCase__ = F'output_blocks.{current_layer-1}.2'
lowerCAmelCase__ = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = checkpoint["out.0.weight"]
lowerCAmelCase__ = checkpoint["out.0.bias"]
lowerCAmelCase__ = checkpoint["out.2.weight"]
lowerCAmelCase__ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = strabool(args.class_cond)
UpperCamelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
UpperCamelCase = None
UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 125 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ = label_idx
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
lowerCAmelCase__ = []
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> Dict:
lowerCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def a ( self : int , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> int:
lowerCAmelCase__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = preds_list[example_id]
lowerCAmelCase__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 125 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
lowercase__ =False
lowercase__ =False
def __UpperCamelCase ( lowerCAmelCase__ : Namespace ):
return TrainCommand(__A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def lowerCAmelCase (snake_case_ : ArgumentParser ):
__a : Dict = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__SCREAMING_SNAKE_CASE , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__SCREAMING_SNAKE_CASE , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__SCREAMING_SNAKE_CASE , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__SCREAMING_SNAKE_CASE , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__SCREAMING_SNAKE_CASE , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__SCREAMING_SNAKE_CASE , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__SCREAMING_SNAKE_CASE , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__SCREAMING_SNAKE_CASE , default=3_2 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__SCREAMING_SNAKE_CASE , default=6_4 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__SCREAMING_SNAKE_CASE , default=3E-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__SCREAMING_SNAKE_CASE , default=1E-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__(self : Optional[Any] , snake_case_ : Namespace ):
__a : Any = logging.get_logger('''transformers-cli/training''' )
__a : int = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__SCREAMING_SNAKE_CASE )
__a : Tuple = args.output
__a : List[Any] = args.column_label
__a : Dict = args.column_text
__a : int = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
__a : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
__a : Optional[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__a : Optional[Any] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
__a : Any = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__a : List[Any] = args.validation_split
__a : Tuple = args.train_batch_size
__a : Dict = args.valid_batch_size
__a : List[str] = args.learning_rate
__a : Optional[int] = args.adam_epsilon
def lowerCAmelCase (self : Tuple ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowerCAmelCase (self : Optional[int] ):
raise NotImplementedError
def lowerCAmelCase (self : Optional[int] ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 521 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _UpperCAmelCase ( __A : Any ):
a_ : List[str] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , __A ).groups()[0]
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]:
a_ : List[str] = file_names
a_ : str = image_transform
a_ : Optional[Any] = label_to_id
def __len__( self : List[str] ) -> Dict:
return len(self.file_names )
def __getitem__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
a_ : Union[str, Any] = self.file_names[idx]
a_ : Union[str, Any] = PIL.Image.open(__SCREAMING_SNAKE_CASE )
a_ : int = raw_image.convert('''RGB''' )
if self.image_transform is not None:
a_ : str = self.image_transform(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = extract_label(__SCREAMING_SNAKE_CASE )
if self.label_to_id is not None:
a_ : Any = self.label_to_id[label]
return {"image": image, "label": label}
def _UpperCAmelCase ( __A : List[Any] , __A : Any ):
# Initialize accelerator
if args.with_tracking:
a_ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
a_ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ : Dict = config['''lr''']
a_ : str = int(config['''num_epochs'''] )
a_ : Any = int(config['''seed'''] )
a_ : Any = int(config['''batch_size'''] )
a_ : Optional[Any] = config['''image_size''']
if not isinstance(__A , (list, tuple) ):
a_ : Union[str, Any] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
a_ : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a_ : List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
a_ : str = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a_ : Dict = os.path.split(__A )[-1].split('''.''' )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
a_ : str = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
a_ : Dict = [extract_label(__A ) for fname in file_names]
a_ : Optional[Any] = list(set(__A ) )
id_to_label.sort()
a_ : Any = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
a_ : Optional[Any] = np.random.permutation(len(__A ) )
a_ : Dict = int(0.8 * len(__A ) )
a_ : Optional[Any] = random_perm[:cut]
a_ : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a_ : Optional[int] = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
a_ : str = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
a_ : Any = Compose([Resize(__A ), ToTensor()] )
a_ : Tuple = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
a_ : Dict = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
a_ : str = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ : Union[str, Any] = create_model('''resnet50d''' , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a_ : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a_ : int = False
for param in model.get_classifier().parameters():
a_ : Tuple = True
# We normalize the batches of images to be a bit faster.
a_ : Union[str, Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
a_ : Union[str, Any] = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a_ : str = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
a_ : Tuple = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ : Tuple = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
a_ : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
a_ : Dict = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
a_ : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a_ : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a_ : Tuple = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a_ : List[str] = os.path.splitext(__A )[0]
if "epoch" in training_difference:
a_ : Tuple = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
a_ : str = None
else:
a_ : List[Any] = int(training_difference.replace('''step_''' , '''''' ) )
a_ : Union[str, Any] = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
a_ : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a_ : Union[str, Any] = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a_ : List[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a_ : str = {k: v.to(accelerator.device ) for k, v in batch.items()}
a_ : int = (batch['''image'''] - mean) / std
a_ : Any = model(__A )
a_ : List[Any] = torch.nn.functional.cross_entropy(__A , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
a_ : int = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a_ : Union[str, Any] = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
a_ : str = 0
a_ : List[Any] = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a_ : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
a_ : List[Any] = (batch['''image'''] - mean) / std
with torch.no_grad():
a_ : Union[str, Any] = model(__A )
a_ : List[str] = outputs.argmax(dim=-1 )
a_ , a_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
a_ : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a_ : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {1_00 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_00 * eval_metric,
'''train_loss''': total_loss.item() / len(__A ),
'''epoch''': epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
a_ : Dict = f'epoch_{epoch}'
if args.output_dir is not None:
a_ : int = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _UpperCAmelCase ( ):
a_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=__A , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=__A , default=__A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=__A , default=__A , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=__A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__A , default=__A , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
a_ : Union[str, Any] = parser.parse_args()
a_ : List[Any] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 466 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class _A ( __lowercase ):
__a = """gpt_bigcode"""
__a = ["""past_key_values"""]
__a = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="gelu_pytorch_tanh" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scale_attn_weights
_UpperCAmelCase = use_cache
_UpperCAmelCase = attention_softmax_in_fpaa
_UpperCAmelCase = scale_attention_softmax_in_fpaa
_UpperCAmelCase = multi_query
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) | 175 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if not conversation_id:
_UpperCAmelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase = []
if generated_responses is None:
_UpperCAmelCase = []
_UpperCAmelCase = conversation_id
_UpperCAmelCase = past_user_inputs
_UpperCAmelCase = generated_responses
_UpperCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
_UpperCAmelCase = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
_UpperCAmelCase = text
def UpperCAmelCase ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase = None
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
_UpperCAmelCase = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
_UpperCAmelCase = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__lowercase , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class _A ( __lowercase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase = self.tokenizer.eos_token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if min_length_for_response is not None:
_UpperCAmelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_UpperCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
_UpperCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
_UpperCAmelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
_UpperCAmelCase = max_length - minimum_tokens
_UpperCAmelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase = model_inputs["""attention_mask"""][:, -trim:]
_UpperCAmelCase = model_inputs.pop("""conversation""" )
_UpperCAmelCase = max_length
_UpperCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
_UpperCAmelCase = model_outputs["""output_ids"""]
_UpperCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.tokenizer.eos_token_id
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
_UpperCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 175 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class A_ :
def __init__( self: Tuple ,__lowerCAmelCase: int = 6 ):
'''simple docstring'''
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
self.create_linked_list(__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Node()
_lowerCamelCase : Optional[int] = current_node
_lowerCamelCase : Any = current_node
_lowerCamelCase : int = current_node
for _ in range(1 ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = Node()
_lowerCamelCase : Any = current_node
_lowerCamelCase : Tuple = previous_node
_lowerCamelCase : Optional[Any] = current_node
_lowerCamelCase : Union[str, Any] = self.front
_lowerCamelCase : Any = previous_node
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _lowercase ( self: Any ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowerCamelCase : List[str] = self.rear.next
if self.rear:
_lowerCamelCase : List[str] = data
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowerCamelCase : Any = self.front.data
_lowerCamelCase : Dict = None
return data
_lowerCamelCase : List[Any] = self.front
_lowerCamelCase : Optional[Any] = old_front.next
_lowerCamelCase : str = old_front.data
_lowerCamelCase : List[str] = None
return data
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self.is_empty():
raise Exception("Empty Queue" )
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class A_ :
def __init__( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Any | None = None
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod() | 46 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int ,a__ : List[Any] ,):
a__ = parent
a__ = 13
a__ = 7
a__ = True
a__ = True
a__ = True
a__ = True
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 2
a__ = 99
a__ = 0
a__ = 32
a__ = 2
a__ = 4
a__ = 0.1
a__ = 0.1
a__ = 5_12
a__ = 16
a__ = 2
a__ = 0.02
a__ = 3
a__ = 4
a__ = "last"
a__ = True
a__ = None
a__ = 0
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
a__ = None
if self.use_input_lengths:
a__ = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase_ ( self : int ,a__ : List[Any] ,a__ : Optional[int] ,a__ : Union[str, Any] ,a__ : Union[str, Any] ,a__ : int ,a__ : Optional[int] ,a__ : int ,a__ : Dict ,a__ : Any ,):
a__ = TFFlaubertModel(config=a__ )
a__ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
a__ = model(a__ )
a__ = [input_ids, input_mask]
a__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[int] ,a__ : List[str] ,a__ : Optional[Any] ,a__ : int ,a__ : Any ,a__ : Optional[int] ,a__ : Optional[int] ,a__ : Dict ,a__ : Dict ,a__ : Union[str, Any] ,):
a__ = TFFlaubertWithLMHeadModel(a__ )
a__ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int ,a__ : str ,a__ : Dict ,a__ : List[str] ,a__ : str ,a__ : Tuple ,a__ : int ,a__ : int ,a__ : str ,a__ : str ,):
a__ = TFFlaubertForQuestionAnsweringSimple(a__ )
a__ = {"input_ids": input_ids, "lengths": input_lengths}
a__ = model(a__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : int ,a__ : Optional[int] ,a__ : Any ,a__ : List[Any] ,a__ : Optional[Any] ,a__ : Dict ,a__ : int ,a__ : List[str] ,a__ : Tuple ,):
a__ = TFFlaubertForSequenceClassification(a__ )
a__ = {"input_ids": input_ids, "lengths": input_lengths}
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Dict ,a__ : Union[str, Any] ,a__ : Tuple ,a__ : List[str] ,a__ : List[str] ,a__ : Dict ,a__ : List[Any] ,a__ : Tuple ,a__ : List[str] ,a__ : Tuple ,):
a__ = self.num_labels
a__ = TFFlaubertForTokenClassification(config=a__ )
a__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple ,a__ : Optional[Any] ,a__ : Optional[Any] ,a__ : int ,a__ : Union[str, Any] ,a__ : List[str] ,a__ : Optional[Any] ,a__ : List[Any] ,a__ : str ,a__ : str ,):
a__ = self.num_choices
a__ = TFFlaubertForMultipleChoice(config=a__ )
a__ = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
a__ = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
a__ = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
a__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCAmelCase_ ( self : str ,a__ : Any ,a__ : List[str] ,a__ : Optional[int] ,a__ : Dict ,a__ : str ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase_ ( self : Dict ):
a__ = TFFlaubertModelTester(self )
a__ = ConfigTester(self ,config_class=a__ ,emb_dim=37 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a__ )
def lowerCAmelCase_ ( self : Any ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a__ )
def lowerCAmelCase_ ( self : List[str] ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a__ )
def lowerCAmelCase_ ( self : List[str] ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a__ )
def lowerCAmelCase_ ( self : int ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a__ )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFFlaubertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase_ ( self : Any ):
a__ = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
a__ = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
a__ = model(a__ )[0]
a__ = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape ,a__ )
# compare the actual values for a slice.
a__ = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 331 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
lowerCAmelCase__ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
lowerCAmelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[int] = collections.OrderedDict()
snake_case__ : str = collections.OrderedDict()
snake_case__ : Any = collections.OrderedDict()
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : List[str] = f.readlines()
snake_case__ : Tuple = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(UpperCAmelCase ):
snake_case__ : Dict = b
snake_case__ : Any = idx
for wd in b:
snake_case__ : Tuple = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Any="<|endoftext|>" , lowerCamelCase : Dict="<|endoftext|>" , lowerCamelCase : Optional[Any]="<|startoftext|>" , lowerCamelCase : str="<|endoftext|>" , lowerCamelCase : str=False , **lowerCamelCase : int , )-> Optional[int]:
super().__init__(
unk_token=lowerCamelCase , pad_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , do_clean_text=lowerCamelCase , **lowerCamelCase , )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
snake_case__ : List[str] = do_clean_text
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = load_vocab_and_emoji(lowerCamelCase , lowerCamelCase )
snake_case__ : str = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __lowerCAmelCase ( self : Any )-> Optional[int]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowerCAmelCase ( self : List[Any] )-> int:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : List[str] )-> Tuple:
return self.subword_tokenizer.tokenize(lowerCamelCase , clean=self.do_clean_text )
def __lowerCAmelCase ( self : str , lowerCamelCase : Optional[Any] )-> Union[str, Any]:
return self.vocab.get(lowerCamelCase , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : Any )-> str:
return self.subword_tokenizer.convert_id_to_token(lowerCamelCase )
def __lowerCAmelCase ( self : int , lowerCamelCase : str )-> Optional[int]:
snake_case__ : Optional[int] = """""".join(lowerCamelCase ).strip()
return out_string
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : "Conversation" )-> List[int]:
snake_case__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
snake_case__ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None )-> Tuple[str]:
snake_case__ : Any = 0
if os.path.isdir(lowerCamelCase ):
snake_case__ : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
snake_case__ : Tuple = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
snake_case__ : Optional[int] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
snake_case__ : Tuple = token_index
writer.write(""",""".join(lowerCamelCase ) + """\n""" )
index += 1
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , lowerCamelCase )
return vocab_file, emoji_file
class _A ( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] )-> Optional[int]:
snake_case__ : Tuple = vocab # same as swe
snake_case__ : str = ids_to_tokens # same as bpe
snake_case__ : Tuple = emoji
snake_case__ : int = np.max([len(lowerCamelCase ) for w in self.vocab.keys()] )
snake_case__ : Dict = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
snake_case__ : List[Any] = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
snake_case__ : Any = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
snake_case__ : Dict = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
snake_case__ : Union[str, Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
snake_case__ : List[Any] = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
snake_case__ : str = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
snake_case__ : List[Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
snake_case__ : Optional[int] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : List[Any] )-> Dict:
return len(self.ids_to_tokens )
def __lowerCAmelCase ( self : str , lowerCamelCase : Union[str, Any] )-> Optional[int]:
snake_case__ : int = self.content_repattera.sub("""<URL>""" , lowerCamelCase )
snake_case__ : Dict = self.content_repattera.sub("""<EMAIL>""" , lowerCamelCase )
snake_case__ : List[Any] = self.content_repattera.sub("""<TEL>""" , lowerCamelCase )
snake_case__ : Union[str, Any] = self.content_repattera.sub("""<DATE>""" , lowerCamelCase )
snake_case__ : str = self.content_repattera.sub("""<DATE>""" , lowerCamelCase )
snake_case__ : Dict = self.content_repattera.sub("""<PRICE>""" , lowerCamelCase )
snake_case__ : int = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
snake_case__ : Dict = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=False )-> Optional[Any]:
snake_case__ : List[str] = text.replace(""" """ , """<SP>""" )
snake_case__ : Dict = text.replace(""" """ , """<SP>""" )
snake_case__ : int = text.replace("""\r\n""" , """<BR>""" )
snake_case__ : Optional[int] = text.replace("""\n""" , """<BR>""" )
snake_case__ : Optional[Any] = text.replace("""\r""" , """<BR>""" )
snake_case__ : Optional[Any] = text.replace("""\t""" , """<TAB>""" )
snake_case__ : int = text.replace("""—""" , """ー""" )
snake_case__ : Tuple = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
snake_case__ : Union[str, Any] = text.replace(lowerCamelCase , lowerCamelCase )
if clean:
snake_case__ : Optional[int] = self.clean_text(lowerCamelCase )
def check_simbol(lowerCamelCase : Optional[int] ):
snake_case__ : Any = x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 2:
snake_case__ : Tuple = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2_a1 and c <= 0xc2_bf)
or (c >= 0xc7_80 and c <= 0xc7_83)
or (c >= 0xca_b9 and c <= 0xcb_bf)
or (c >= 0xcc_80 and c <= 0xcd_a2)
):
return True
return False
def checkuae(lowerCamelCase : Union[str, Any] ):
snake_case__ : str = x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 3:
snake_case__ : List[str] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_80_80 and c <= 0xe2_b0_7f:
return True
return False
snake_case__ : int = 0
snake_case__ : Dict = []
while pos < len(lowerCamelCase ):
snake_case__ : str = min(len(lowerCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
snake_case__ : List[str] = [] # (token_id, token, pos)
for e in range(lowerCamelCase , lowerCamelCase , -1 ):
snake_case__ : Union[str, Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCamelCase ) > 2:
snake_case__ : Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCamelCase ) > 0:
# the smallest token_id is adopted
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = sorted(lowerCamelCase , key=lambda lowerCamelCase : x[0] )[0]
result.append(lowerCamelCase )
snake_case__ : str = e
else:
snake_case__ : str = pos + 1
snake_case__ : Optional[Any] = text[pos:end]
if check_simbol(lowerCamelCase ):
result.append("""<KIGOU>""" )
elif checkuae(lowerCamelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
snake_case__ : Dict = end
return result
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Any="\n" )-> Optional[int]:
snake_case__ : Any = []
snake_case__ : Dict = []
snake_case__ : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode("""utf-8""" , errors="""replace""" ) )
snake_case__ : Any = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowerCamelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode("""utf-8""" , errors="""replace""" ) )
snake_case__ : Dict = """""".join(lowerCamelCase )
return text
| 172 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(UpperCAmelCase , max_perimeter + 1 ):
snake_case__ : Tuple = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(UpperCAmelCase ):
snake_case__ : List[str] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCAmelCase__ ( UpperCAmelCase = 1000 ):
"""simple docstring"""
snake_case__ : Optional[int] = pythagorean_triple(UpperCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 172 | 1 |
from math import factorial
def __SCREAMING_SNAKE_CASE ( a__ : int = 100 ) -> int:
return sum(int(a__ ) for x in str(factorial(a__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 17 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> List[str]:
'''simple docstring'''
lowercase__ = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = ImageGPTImageProcessingTester(self )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _A ( ):
lowercase__ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ = Image.open(dataset[4]["""file"""] )
lowercase__ = Image.open(dataset[5]["""file"""] )
lowercase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ = prepare_images()
# test non-batched
lowercase__ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
lowercase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
lowercase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ )
| 325 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__lowerCAmelCase : Any = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 710 | '''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase ) | 156 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """table-transformer"""
SCREAMING_SNAKE_CASE__ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=1_00 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_="resnet50" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = backbone_config.get("""model_type""" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None, None, None
UpperCamelCase__ = use_timm_backbone
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_queries
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = backbone
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = dilation
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase_ (self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase_ (self ):
return self.d_model
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse("""1.11""" )
@property
def UpperCAmelCase_ (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCAmelCase_ (self ):
return 1E-5
@property
def UpperCAmelCase_ (self ):
return 12
| 720 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase_ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" )
UpperCamelCase__ = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 86 | 0 |
import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = psutil.Process()
SCREAMING_SNAKE_CASE : int = False
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = -1
while True:
SCREAMING_SNAKE_CASE : Tuple = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
self.thread.start()
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def lowerCamelCase__ ( ):
# Time
SCREAMING_SNAKE_CASE : Optional[int] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE : List[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count()):
SCREAMING_SNAKE_CASE : Dict = torch.cuda.memory_allocated(_a)
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCamelCase__ ( _a):
# Time
SCREAMING_SNAKE_CASE : Optional[int] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE : Tuple = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
SCREAMING_SNAKE_CASE : Optional[Any] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count()):
SCREAMING_SNAKE_CASE : Tuple = (torch.cuda.memory_allocated(_a) - start_measures[str(_a)]) / 2**20
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.cuda.max_memory_allocated(_a) - start_measures[str(_a)]) / 2**20
return measures
def lowerCamelCase__ ( _a , _a):
print(f"{description}:")
print(f"- Time: {measures['time']:.2f}s")
for i in range(torch.cuda.device_count()):
print(f"- GPU {i} allocated: {measures[str(_a)]:.2f}MiB")
SCREAMING_SNAKE_CASE : Tuple = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB")
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB")
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB") | 25 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def _a ( self , _A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def _a ( self ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self , _A , _A=1_6_0_0_0 , _A = 5_1_2 , _A = 5_1_2 , _A = 5_0 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ):
'''simple docstring'''
UpperCamelCase : str = self.speech_processor.feature_extractor(
_A , return_tensors="""pt""" , sampling_rate=_A ).input_features.to(self.device )
UpperCamelCase : List[Any] = self.speech_model.generate(_A , max_length=4_8_0_0_0_0 )
UpperCamelCase : Optional[int] = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
UpperCamelCase : Tuple = 1
elif isinstance(_A , _A ):
UpperCamelCase : List[Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get prompt text embeddings
UpperCamelCase : Dict = self.tokenizer(
_A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[int] = text_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : str = [""""""] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
f""" {type(_A )}.""" )
elif isinstance(_A , _A ):
UpperCamelCase : Tuple = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : Any = negative_prompt
UpperCamelCase : Optional[int] = text_input_ids.shape[-1]
UpperCamelCase : List[str] = self.tokenizer(
_A , padding="""max_length""" , max_length=_A , truncation=_A , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : List[Any] = uncond_embeddings.shape[1]
UpperCamelCase : Dict = uncond_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Tuple = torch.randn(_A , generator=_A , device="""cpu""" , dtype=_A ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : Tuple = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : str = {}
if accepts_eta:
UpperCamelCase : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : str = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCamelCase : Optional[Any] = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
UpperCamelCase : Optional[Any] = 1 / 0.1_82_15 * latents
UpperCamelCase : Union[str, Any] = self.vae.decode(_A ).sample
UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 102 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """distilbert"""
_lowerCamelCase = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self ,lowercase=30522 ,lowercase=512 ,lowercase=False ,lowercase=6 ,lowercase=12 ,lowercase=768 ,lowercase=4 * 768 ,lowercase=0.1 ,lowercase=0.1 ,lowercase="gelu" ,lowercase=0.02 ,lowercase=0.1 ,lowercase=0.2 ,lowercase=0 ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = sinusoidal_pos_embds
UpperCAmelCase_ : Optional[Any] = n_layers
UpperCAmelCase_ : str = n_heads
UpperCAmelCase_ : List[str] = dim
UpperCAmelCase_ : Dict = hidden_dim
UpperCAmelCase_ : Optional[Any] = dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Optional[int] = activation
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Tuple = qa_dropout
UpperCAmelCase_ : Optional[Any] = seq_classif_dropout
super().__init__(**lowercase ,pad_token_id=lowercase)
class snake_case_ (lowercase__ ):
"""simple docstring"""
@property
def A_ ( self):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 455 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 455 | 1 |
def a_ ( __magic_name__ , __magic_name__ ) -> Dict:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Dict = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447 | 0 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__, """embed_dim""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__, """num_heads""" ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=64, lowerCamelCase__=3, lowerCamelCase__=[16, 48, 96], lowerCamelCase__=[1, 3, 6], lowerCamelCase__=[1, 2, 10], lowerCamelCase__=[7, 3, 3], lowerCamelCase__=[4, 2, 2], lowerCamelCase__=[2, 1, 1], lowerCamelCase__=[2, 2, 2], lowerCamelCase__=[False, False, True], lowerCamelCase__=[0.0, 0.0, 0.0], lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=2, ):
A : Optional[Any] = parent
A : Tuple = batch_size
A : Union[str, Any] = image_size
A : Any = patch_sizes
A : Tuple = patch_stride
A : List[Any] = patch_padding
A : int = is_training
A : List[Any] = use_labels
A : List[str] = num_labels
A : str = num_channels
A : List[Any] = embed_dim
A : Any = num_heads
A : Optional[int] = stride_kv
A : str = depth
A : Dict = cls_token
A : str = attention_drop_rate
A : str = initializer_range
A : Tuple = layer_norm_eps
def _lowerCAmelCase ( self ):
A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
A : Tuple = ids_tensor([self.batch_size], self.num_labels )
A : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = TFCvtModel(config=lowerCamelCase__ )
A : Union[str, Any] = model(lowerCamelCase__, training=lowerCamelCase__ )
A : List[str] = (self.image_size, self.image_size)
A , A : int = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
A : List[str] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
A : int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = self.num_labels
A : List[str] = TFCvtForImageClassification(lowerCamelCase__ )
A : str = model(lowerCamelCase__, labels=lowerCamelCase__, training=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ):
A : Any = self.prepare_config_and_inputs()
A , A , A : Optional[int] = config_and_inputs
A : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowerCamelCase : Optional[Any] = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Dict = False
def _lowerCAmelCase ( self ):
A : Optional[int] = TFCvtModelTester(self )
A : Tuple = TFCvtConfigTester(self, config_class=lowerCamelCase__, has_text_modality=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", )
def _lowerCAmelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", )
@slow
def _lowerCAmelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def _lowerCAmelCase ( self ):
A : Optional[int] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(lowerCamelCase__ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def _lowerCAmelCase ( self ):
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(lowerCamelCase__ )
A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Dict = [*signature.parameters.keys()]
A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], lowerCamelCase__ )
def _lowerCAmelCase ( self ):
def check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = model_class(lowerCamelCase__ )
A : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ ) )
A : int = outputs.hidden_states
A : List[str] = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase__ ), lowerCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = True
check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Dict = True
check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Dict = TFCvtModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
A : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowerCAmelCase ( self ):
A : List[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A : Dict = self.default_image_processor
A : Any = prepare_img()
A : Dict = image_processor(images=lowerCamelCase__, return_tensors="""tf""" )
# forward pass
A : int = model(**lowerCamelCase__ )
# verify the logits
A : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase__ )
A : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), lowerCamelCase__, atol=1e-4 ) )
| 520 |
from __future__ import annotations
from math import gcd
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
return (pow(_lowerCAmelCase , 2 ) + step) % modulus
for _ in range(_lowerCAmelCase ):
# These track the position within the cycle detection logic.
A : Optional[Any] = seed
A : List[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
A : Any = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : List[str] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Optional[Any] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
A : str = gcd(hare - tortoise , _lowerCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
A : Tuple = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
SCREAMING_SNAKE_CASE_:List[str] = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
SCREAMING_SNAKE_CASE_:Any = parser.parse_args()
SCREAMING_SNAKE_CASE_:int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
SCREAMING_SNAKE_CASE_:Optional[int] = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 520 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = CanineTokenizer
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Any:
super().setUp()
lowerCAmelCase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self ) -> Union[str, Any]:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def _snake_case ( self , **lowercase ) -> CanineTokenizer:
lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
lowerCAmelCase = 1_024
return tokenizer
@require_torch
def _snake_case ( self ) -> str:
lowerCAmelCase = self.canine_tokenizer
lowerCAmelCase = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
lowerCAmelCase = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
lowerCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.canine_tokenizer
lowerCAmelCase = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
lowerCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertIn("""token_type_ids""" , lowercase )
@require_torch
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.canine_tokenizer
lowerCAmelCase = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
lowerCAmelCase = tokenizer(
text_target=lowercase , max_length=32 , padding="""max_length""" , truncation=lowercase , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _snake_case ( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
lowerCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
lowerCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
lowerCAmelCase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCAmelCase = chr(0XE_007 )
additional_special_tokens.append(lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
lowerCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
lowerCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn(lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase , lowerCAmelCase = self.get_clean_sequence(lowercase )
# a special token for Canine can be defined as follows:
lowerCAmelCase = 0XE_005
lowerCAmelCase = chr(lowercase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(len(lowercase ) , 1 )
lowerCAmelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(lowercase , input_encoded + special_token_id )
lowerCAmelCase = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertTrue(special_token not in decoded )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = chr(0XE_005 )
lowerCAmelCase = chr(0XE_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = tokenizer.tokenize(lowercase )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(token_a[0] , lowercase )
self.assertEqual(token_a[0] , lowercase )
@require_tokenizers
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
lowerCAmelCase = 0XE_006
lowerCAmelCase = chr(lowercase )
lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowercase )
tokenizer.from_pretrained(lowercase )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase = json.load(lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase = json.load(lowercase )
# a special token for Canine can be defined as follows:
lowerCAmelCase = 0XE_006
lowerCAmelCase = chr(lowercase )
lowerCAmelCase = [new_token_a]
lowerCAmelCase = [new_token_a]
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase = tokenizer_class.from_pretrained(lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCAmelCase = 0XE_007
lowerCAmelCase = chr(lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase = [AddedToken(lowercase , lstrip=lowercase )]
lowerCAmelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = """hello world"""
if self.space_between_special_tokens:
lowerCAmelCase = """[CLS] hello world [SEP]"""
else:
lowerCAmelCase = input
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer.decode(lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowercase , [output, output.lower()] )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCAmelCase = """a"""
lowerCAmelCase = ord(lowercase )
for attr in attributes_list:
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [] )
lowerCAmelCase = 0XE_006
lowerCAmelCase = chr(lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def _snake_case ( self ) -> Tuple:
pass
def _snake_case ( self ) -> Optional[Any]:
pass
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> str:
pass
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self ) -> Optional[Any]:
pass
| 532 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a= {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701 | '''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a= logging.get_logger(__name__)
a= '''▁'''
a= {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
a= {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
a= {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
a= ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
a= {'''mustc''': MUSTC_LANGS}
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<unk>" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = None , **_lowerCamelCase , ):
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__UpperCamelCase : Union[str, Any] = do_upper_case
__UpperCamelCase : Dict = do_lower_case
__UpperCamelCase : List[str] = load_json(_lowerCamelCase )
__UpperCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = spm_file
__UpperCamelCase : List[Any] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCamelCase : Any = lang_codes
__UpperCamelCase : Any = LANGUAGES[lang_codes]
__UpperCamelCase : str = [f"""<lang:{lang}>""" for lang in self.langs]
__UpperCamelCase : List[str] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
__UpperCamelCase : str = self.lang_tokens
__UpperCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCamelCase : Dict = {}
@property
def lowerCAmelCase ( self ):
return len(self.encoder )
@property
def lowerCAmelCase ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : int = self.lang_code_to_id[tgt_lang]
__UpperCamelCase : List[str] = [lang_code_id]
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCamelCase : int = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCamelCase : int = []
else:
current_sub_tokens.append(_lowerCamelCase )
__UpperCamelCase : str = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
__UpperCamelCase : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__UpperCamelCase : int = self.__dict__.copy()
__UpperCamelCase : Dict = None
return state
def __setstate__( self , _lowerCamelCase ):
__UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase : Optional[int] = {}
__UpperCamelCase : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ):
__UpperCamelCase : List[str] = Path(_lowerCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
__UpperCamelCase : Optional[int] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def _UpperCamelCase ( _a : str , _a : Dict[str, Any] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = sentencepiece.SentencePieceProcessor(**_a )
spm.Load(str(_a ) )
return spm
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
with open(_a , 'r' ) as f:
return json.load(_a )
def _UpperCamelCase ( _a : Any , _a : str ):
"""simple docstring"""
with open(_a , 'w' ) as f:
json.dump(_a , _a , indent=2 )
| 287 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'sentencepiece.bpe.model'}
a_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
a_ = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
a_ = '▁'
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowercase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__lowercase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__lowercase : Optional[Any] = vocab_file
__lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__lowercase : str = len(self.sp_model ) - 1
__lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase : int = [self.cls_token_id]
__lowercase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
__lowercase : Any = [self.sep_token_id]
__lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ )
return spm_id if spm_id else self.unk_token_id
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
__lowercase : Union[str, Any] = []
__lowercase : int = ''''''
__lowercase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
__lowercase : List[str] = True
__lowercase : Optional[int] = []
else:
current_sub_tokens.append(UpperCamelCase_ )
__lowercase : int = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def __getstate__( self ) -> Any:
__lowercase : Optional[Any] = self.__dict__.copy()
__lowercase : Optional[int] = None
return state
def __setstate__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowercase : Any = {}
__lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
__lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 76 | """simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> np.ndarray:
_SCREAMING_SNAKE_CASE : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
_SCREAMING_SNAKE_CASE : List[str] = np.zeros((n + 1,) )
_SCREAMING_SNAKE_CASE : Optional[Any] = ya
_SCREAMING_SNAKE_CASE : Tuple = xa
for k in range(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = y[k] + step_size * ode_func(__SCREAMING_SNAKE_CASE , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = '▁'
lowercase_ = {'vocab_file': 'prophetnet.tokenizer'}
lowercase_ = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowercase_ = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowercase_ = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = collections.OrderedDict()
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__A = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
__A = token.rstrip('''\n''' )
__A = index
return vocab
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Tuple = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Union[str, Any]="[SEP]", _lowerCamelCase : Tuple="[SEP]", _lowerCamelCase : List[Any]="[SEP]", _lowerCamelCase : Tuple="[UNK]", _lowerCamelCase : Union[str, Any]="[PAD]", _lowerCamelCase : List[Any]="[CLS]", _lowerCamelCase : Tuple="[MASK]", _lowerCamelCase : Optional[Dict[str, Any]] = None, **_lowerCamelCase : Optional[Any], ):
'''simple docstring'''
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, sep_token=_lowerCamelCase, unk_token=_lowerCamelCase, pad_token=_lowerCamelCase, cls_token=_lowerCamelCase, mask_token=_lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **_lowerCamelCase, )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
__A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__A = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
__A = f'[unused{i}]'
__A = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__A = 12
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_lowerCamelCase )
def __getstate__( self : List[str] ):
'''simple docstring'''
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Dict, _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + [1]
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__A = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase, out_type=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__A = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase, ''' ''' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__A = os.path.join(
_lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase, '''wb''' ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__A = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 710 |
"""simple docstring"""
lowercase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowercase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowercase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 215 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=2_2_4 , lowerCAmelCase_ : Union[str, Any]=3_0 , lowerCAmelCase_ : Any=4_0_0 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase_ : int=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowercase_ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = ViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = EfficientFormerImageProcessorTester(self)
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """size"""))
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input
lowercase_ = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase_ = image_processor(lowerCAmelCase_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input
lowercase_ = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase_ = image_processor(lowerCAmelCase_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input
lowercase_ = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase_ = image_processor(lowerCAmelCase_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 567 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 567 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__lowercase = ['''small''', '''medium''', '''large''']
__lowercase = '''lm_head.decoder.weight'''
__lowercase = '''lm_head.weight'''
def snake_case__ ( _A: str , _A: str ) -> Dict:
'''simple docstring'''
lowerCAmelCase = torch.load(_A )
lowerCAmelCase = d.pop(_A )
os.makedirs(_A , exist_ok=_A )
torch.save(_A , os.path.join(_A , _A ) )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__lowercase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__lowercase = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
__lowercase = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 605 | '''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case__ ( _A: np.ndarray , _A: np.ndarray , _A: np.ndarray , _A: int , _A: int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase = cva.getAffineTransform(_A , _A )
return cva.warpAffine(_A , _A , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowercase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowercase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowercase , __lowercase = gray_img.shape
# set different points to rotate image
__lowercase = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
__lowercase = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
__lowercase = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
__lowercase = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
__lowercase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowercase = plt.figure(1)
__lowercase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 605 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowercase ( __lowerCAmelCase = "https://www.worldometers.info/coronavirus" ) -> dict:
SCREAMING_SNAKE_CASE__ : Any = BeautifulSoup(requests.get(_A ).text , """html.parser""" )
SCREAMING_SNAKE_CASE__ : Tuple = soup.findAll("""h1""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(_A , _A )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 680 |
def UpperCamelCase ( _A : int )-> int:
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
A__ = 0
A__ = str(_A )
while len(_A ) != 1:
A__ = [int(_A ) for i in num_string]
A__ = 1
for i in range(0 , len(_A ) ):
total *= numbers[i]
A__ = str(_A )
steps += 1
return steps
def UpperCamelCase ( _A : int )-> int:
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
A__ = 0
A__ = str(_A )
while len(_A ) != 1:
A__ = [int(_A ) for i in num_string]
A__ = 0
for i in range(0 , len(_A ) ):
total += numbers[i]
A__ = str(_A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 | 0 |
import math
def _UpperCAmelCase ( __lowerCamelCase : int ) -> list[int]:
_snake_case = []
_snake_case = 2
_snake_case = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
_snake_case = [True] * (end + 1)
_snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
_snake_case = False
start += 1
prime += in_prime
_snake_case = end + 1
_snake_case = min(2 * end , lowerCAmelCase__ )
while low <= n:
_snake_case = [True] * (high - low + 1)
for each in in_prime:
_snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
_snake_case = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
_snake_case = high + 1
_snake_case = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(10**6))
| 721 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(1_25.50, 0.05) = }")
| 430 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : Dict = 10 , lowercase : str = 22 ) -> int:
"""simple docstring"""
snake_case : Tuple = range(1 , lowercase )
snake_case : Tuple = range(1 , lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(10, 22) = }''')
| 178 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 678 | 0 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
# use last results for better performance - dynamic programming
lowercase__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ = j
return prefix_result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return max(prefix_function(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37 | 0 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
__magic_name__ : Optional[Any] = np.inf
def set_batch_size(UpperCAmelCase ) -> None:
nonlocal batch_size
if isinstance(UpperCAmelCase, UpperCAmelCase ):
__magic_name__ : Optional[int] = min(UpperCAmelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCAmelCase, UpperCAmelCase ):
__magic_name__ : Union[str, Any] = min(UpperCAmelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCAmelCase, UpperCAmelCase ) and feature.dtype == "binary":
__magic_name__ : str = min(UpperCAmelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCAmelCase, UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , )
__magic_name__ : Union[str, Any] = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths}
__magic_name__ : int = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
__magic_name__ : Dict = Parquet(
cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , hash=lowerCamelCase , **lowerCamelCase , )
def lowercase ( self ) -> int:
"""simple docstring"""
if self.streaming:
__magic_name__ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ : Tuple = None
__magic_name__ : int = None
__magic_name__ : Optional[Any] = None
__magic_name__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , )
__magic_name__ : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class A__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
__magic_name__ : Any = dataset
__magic_name__ : Dict = path_or_buf
__magic_name__ : List[Any] = batch_size or get_writer_batch_size(dataset.features )
__magic_name__ : int = parquet_writer_kwargs
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
__magic_name__ : Tuple = self._write(file_obj=lowerCamelCase , batch_size=lowerCamelCase , **self.parquet_writer_kwargs )
else:
__magic_name__ : Union[str, Any] = self._write(file_obj=self.path_or_buf , batch_size=lowerCamelCase , **self.parquet_writer_kwargs )
return written
def lowercase ( self , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> int:
"""simple docstring"""
__magic_name__ : List[Any] = 0
__magic_name__ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowerCamelCase )
__magic_name__ : List[Any] = self.dataset.features.arrow_schema
__magic_name__ : int = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase , **lowerCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
__magic_name__ : List[str] = query_table(
table=self.dataset._data , key=slice(lowerCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCamelCase )
written += batch.nbytes
writer.close()
return written
| 154 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Union[str, Any] ="xlm"
lowerCamelCase__ : Any ={
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , lowerCamelCase=30145 , lowerCamelCase=2048 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=2048**-0.5 , lowerCamelCase=1e-12 , lowerCamelCase=0.0_2 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase="first" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=5 , lowerCamelCase=5 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=0 , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = vocab_size
__magic_name__ : str = emb_dim
__magic_name__ : Union[str, Any] = n_layers
__magic_name__ : Optional[Any] = n_heads
__magic_name__ : Dict = dropout
__magic_name__ : List[str] = attention_dropout
__magic_name__ : Optional[Any] = gelu_activation
__magic_name__ : Any = sinusoidal_embeddings
__magic_name__ : List[Any] = causal
__magic_name__ : Optional[Any] = asm
__magic_name__ : Tuple = n_langs
__magic_name__ : Union[str, Any] = use_lang_emb
__magic_name__ : str = layer_norm_eps
__magic_name__ : int = bos_index
__magic_name__ : int = eos_index
__magic_name__ : Any = pad_index
__magic_name__ : int = unk_index
__magic_name__ : Tuple = mask_index
__magic_name__ : int = is_encoder
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = embed_init_std
__magic_name__ : int = init_std
__magic_name__ : Optional[Any] = summary_type
__magic_name__ : List[str] = summary_use_proj
__magic_name__ : Optional[Any] = summary_activation
__magic_name__ : Union[str, Any] = summary_proj_to_labels
__magic_name__ : int = summary_first_dropout
__magic_name__ : Dict = start_n_top
__magic_name__ : int = end_n_top
__magic_name__ : Optional[int] = mask_token_id
__magic_name__ : Dict = lang_id
if "n_words" in kwargs:
__magic_name__ : str = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , **lowerCamelCase )
class A__ ( __SCREAMING_SNAKE_CASE ):
@property
def lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 154 | 1 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = inspect.getfile(accelerate.test_utils )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_snake_case = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCamelCase( self ):
_snake_case = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_snake_case = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
| 368 |
'''simple docstring'''
import functools
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = len(SCREAMING_SNAKE_CASE__ )
_snake_case = len(SCREAMING_SNAKE_CASE__ )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_snake_case = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE__ ) , 1 + min_distance(SCREAMING_SNAKE_CASE__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_SCREAMING_SNAKE_CASE : str = "\nimport os\n"
_SCREAMING_SNAKE_CASE : List[Any] = "\ndef foo():\n import os\n return False\n"
_SCREAMING_SNAKE_CASE : str = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_SCREAMING_SNAKE_CASE : Any = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_SCREAMING_SNAKE_CASE : Optional[Any] = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_SCREAMING_SNAKE_CASE : List[Any] = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_SCREAMING_SNAKE_CASE : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_SCREAMING_SNAKE_CASE : Optional[Any] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_SCREAMING_SNAKE_CASE : Optional[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_SCREAMING_SNAKE_CASE : int = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_SCREAMING_SNAKE_CASE : List[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = os.path.join(UpperCamelCase_ ,'''test_file.py''' )
with open(UpperCamelCase_ ,'''w''' ) as _tmp_file:
_tmp_file.write(UpperCamelCase_ )
snake_case = get_imports(UpperCamelCase_ )
assert parsed_imports == ["os"]
| 550 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'deit'
def __init__( self , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1E-12 , __snake_case=2_2_4 , __snake_case=1_6 , __snake_case=3 , __snake_case=True , __snake_case=1_6 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = qkv_bias
snake_case = encoder_stride
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-4
| 550 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''CLIPImageProcessor'''
UpperCamelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Tuple =kwargs.pop('''feature_extractor''' )
lowercase : List[str] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : int=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase : int =self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
lowercase : Union[str, Any] =self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
lowercase : Optional[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def A__ ( self : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer.model_input_names
lowercase : List[str] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484 | def snake_case__ ( lowercase , lowercase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 613 | 0 |
'''simple docstring'''
import math
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = [True] * n
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase : Dict = i * 2
while index < n:
UpperCAmelCase : Tuple = False
UpperCAmelCase : Tuple = index + i
UpperCAmelCase : List[str] = [2]
for i in range(3 , __magic_name__ , 2 ):
if is_prime[i]:
primes.append(__magic_name__ )
return primes
def lowercase ( __magic_name__ = 9999_6666_3333 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = math.floor(math.sqrt(__magic_name__ ) ) + 100
UpperCAmelCase : List[str] = prime_sieve(__magic_name__ )
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = 0
UpperCAmelCase : Union[str, Any] = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase : List[str] = primes[prime_index + 1]
UpperCAmelCase : Dict = last_prime**2
UpperCAmelCase : Tuple = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase : Union[str, Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase : List[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution()) | 716 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : str = "▁"
a : Union[str, Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
a : Tuple = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
a : List[str] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
a : List[str] = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
a : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ["input_ids"]
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = RESOURCE_FILES_NAMES
def __init__( self , snake_case , snake_case=None , snake_case=False , snake_case="utf8" , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , vocab_file=snake_case , encoding=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCAmelCase : Tuple = do_lower_case
UpperCAmelCase : str = sentencepiece_model_ckpt
UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase : Optional[int] = self.load_vocab(filepath=snake_case )
else:
UpperCAmelCase : Optional[Any] = {self.sp_model.id_to_piece(snake_case ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
def A_ ( self , snake_case ):
'''simple docstring'''
if text is None:
return None
UpperCAmelCase : List[str] = self.tokenize(snake_case )
UpperCAmelCase , UpperCAmelCase : str = "", []
for i, ch in enumerate(snake_case ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase : Tuple = self.SP_CHAR_MAPPING.get(snake_case )
else:
UpperCAmelCase : Optional[Any] = unicodedata.normalize("NFKC" , snake_case )
if self.is_whitespace(snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase : List[str] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase : Optional[Any] = token[1:]
UpperCAmelCase : List[str] = text[offset:].index(snake_case ) + offset
UpperCAmelCase : Optional[int] = start + len(snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase : List[Any] = end
return token_mapping
@property
def A_ ( self ):
'''simple docstring'''
return len(self.vocab )
def A_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : List[str] = None
return state
def __setstate__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : int = {}
UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A_ ( self , snake_case ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case , snake_case ) for c in text) )
def A_ ( self , snake_case , snake_case=False , snake_case=6_4 , snake_case=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
UpperCAmelCase : Any = True
if self.sp_model_kwargs.get("alpha" ) is not None:
UpperCAmelCase : List[str] = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
UpperCAmelCase : Optional[int] = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
UpperCAmelCase : Dict = self.sp_model.EncodeAsPieces(snake_case )
else:
UpperCAmelCase : List[str] = self.sp_model.SampleEncodeAsPieces(snake_case , snake_case , snake_case )
UpperCAmelCase : Dict = []
for pi, piece in enumerate(snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case ) and pi != 0:
new_pieces.append(snake_case )
continue
else:
continue
UpperCAmelCase : Optional[int] = 0
for i, chunk in enumerate(snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case ) or self.is_punct(snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case )
UpperCAmelCase : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : Dict = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : int = i
if len(snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = self.convert_ids_to_tokens(snake_case )
UpperCAmelCase : List[Any] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def A_ ( self , snake_case ):
'''simple docstring'''
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def A_ ( self , snake_case ):
'''simple docstring'''
return self.reverse_vocab.get(snake_case , self.unk_token )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A_ ( self , snake_case , snake_case=None , snake_case=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case ) + 1) + [1] * (len(snake_case ) + 3)
def A_ ( self , snake_case ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case ) == 1:
UpperCAmelCase : List[str] = unicodedata.category(snake_case )
if cat == "Zs":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with io.open(snake_case , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case ):
UpperCAmelCase : Union[str, Any] = line.rstrip("\n" )
UpperCAmelCase : Tuple = int(snake_case )
return token_to_idx
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Any = 0
if os.path.isdir(snake_case ):
UpperCAmelCase : int = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase : List[str] = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(snake_case , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase : Optional[Any] = token_index
writer.write(token + "\n" )
index += 1
UpperCAmelCase : List[str] = os.path.join(snake_case , "sentencepiece.bpe.model" )
with open(snake_case , "wb" ) as fi:
UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (vocab_file,)
| 609 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=13 , lowerCAmelCase__ : Any=32 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : List[Any]=640 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]="silu" , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Optional[int]=32 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : List[str]=None , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = last_hidden_size
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = conv_kernel_size
_UpperCamelCase = output_stride
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = classifier_dropout_prob
_UpperCamelCase = use_labels
_UpperCamelCase = is_training
_UpperCamelCase = num_labels
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = MobileViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileViTForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCamelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : Any = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
_snake_case : Dict = False
_snake_case : List[str] = False
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTModelTester(self )
_UpperCamelCase = MobileViTConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ):
_UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = 5
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCamelCase = 2
for i in range(len(lowerCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = MobileViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowerCAmelCase__ )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
_UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_UpperCamelCase = model.to(lowerCAmelCase__ )
_UpperCamelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
_UpperCamelCase = outputs.logits
# verify the logits
_UpperCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_UpperCamelCase = model.to(lowerCAmelCase__ )
_UpperCamelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
_UpperCamelCase = outputs.logits.detach().cpu()
_UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(50, 60)] )
_UpperCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
_UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
_UpperCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
| 98 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A : str = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether tp freeze the encoder.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether to freeze the embeddings.'})
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
lowercase = field(
default='summarization' ,metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} ,)
lowercase = field(
default=10_24 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=1_28 ,metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=1_42 ,metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} ,)
lowercase = field(
default=1_42 ,metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(default=-1 ,metadata={'help': '# training examples. -1 means use all.'})
lowercase = field(default=-1 ,metadata={'help': '# validation examples. -1 means use all.'})
lowercase = field(default=-1 ,metadata={'help': '# test examples. -1 means use all.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Source language id for translation.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Target language id for translation.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': '# num_beams to use for evaluation.'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} ,)
def UpperCamelCase_ ( A__ : Tuple , A__ : List[str] , A__ : str ):
'''simple docstring'''
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(A__ , os.path.join(A__ , f'{split}_results.json' ) )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(A__ , A__ , getattr(A__ , A__ ) )
lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCAmelCase_ : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
lowerCAmelCase_ : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCAmelCase_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCAmelCase_ : Any = SeqaSeqDataset
# Get datasets
lowerCAmelCase_ : List[str] = (
dataset_class(
A__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowerCAmelCase_ : List[Any] = (
dataset_class(
A__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCAmelCase_ : List[Any] = (
dataset_class(
A__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCAmelCase_ : Dict = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
lowerCAmelCase_ : str = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
lowerCAmelCase_ : Dict = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowerCAmelCase_ : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCAmelCase_ : Union[str, Any] = train_result.metrics
lowerCAmelCase_ : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ : Any = trainer.evaluate(metric_key_prefix="""val""" )
lowerCAmelCase_ : Optional[int] = data_args.n_val
lowerCAmelCase_ : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase_ : Union[str, Any] = trainer.predict(test_dataset=A__ , metric_key_prefix="""test""" )
lowerCAmelCase_ : Optional[int] = test_output.metrics
lowerCAmelCase_ : List[Any] = data_args.n_test
if trainer.is_world_process_zero():
lowerCAmelCase_ : int = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
lowerCAmelCase_ : int = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
lowerCAmelCase_ : List[Any] = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 275 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['DeiTFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 340 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list ):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a__ : List[str] = grid[0]
for row_n in range(1 , len(lowerCAmelCase__ ) ):
a__ : Tuple = grid[row_n]
a__ : Union[str, Any] = fill_row(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[Any] = grid[row_n]
return grid[-1][-1]
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any] ):
A_ = parent
def __A ( self : List[str] ):
return {}
def __snake_case ( ):
"""simple docstring"""
A_ = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
A_ = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def __A ( self : int ):
A_ = MarkupLMFeatureExtractionTester(self )
@property
def __A ( self : List[str] ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def __A ( self : Tuple ):
# Initialize feature_extractor
A_ = self.feature_extraction_class()
# Test not batched input
A_ = get_html_strings()[0]
A_ = feature_extractor(UpperCAmelCase )
# fmt: off
A_ = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
A_ = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase )
self.assertEqual(encoding.xpaths , UpperCAmelCase )
# Test batched
A_ = get_html_strings()
A_ = feature_extractor(UpperCAmelCase )
# fmt: off
A_ = expected_nodes + [["My First Heading", "My first paragraph."]]
A_ = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase )
self.assertEqual(encoding.xpaths , UpperCAmelCase ) | 86 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase_ ( nn.Module ):
_A : int
_A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = hidden_states.shape
UpperCAmelCase = jax.image.resize(
snake_case__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
UpperCAmelCase = self.conv(snake_case__ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
_A : int
_A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.conv(snake_case__ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
_A : int
_A : int = None
_A : float = 0.0
_A : bool = None
_A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase = nn.Dense(snake_case__ , dtype=self.dtype )
UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase = nn.Dropout(self.dropout_prob )
UpperCAmelCase = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase = None
if use_nin_shortcut:
UpperCAmelCase = nn.Conv(
snake_case__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , snake_case__ , snake_case__ , snake_case__=True ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = hidden_states
UpperCAmelCase = self.norma(snake_case__ )
UpperCAmelCase = nn.swish(snake_case__ )
UpperCAmelCase = self.conva(snake_case__ )
UpperCAmelCase = self.time_emb_proj(nn.swish(snake_case__ ) )
UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(snake_case__ , 1 ) , 1 )
UpperCAmelCase = hidden_states + temb
UpperCAmelCase = self.norma(snake_case__ )
UpperCAmelCase = nn.swish(snake_case__ )
UpperCAmelCase = self.dropout(snake_case__ , snake_case__ )
UpperCAmelCase = self.conva(snake_case__ )
if self.conv_shortcut is not None:
UpperCAmelCase = self.conv_shortcut(snake_case__ )
return hidden_states + residual
| 673 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
with open(__a ) as metadata_file:
lowercase__ : List[Any] = json.load(__a )
lowercase__ : Dict = LukeConfig(use_entity_aware_attention=__a , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowercase__ : Optional[Any] = torch.load(__a , map_location="""cpu""" )
# Load the entity vocab file
lowercase__ : Optional[Any] = load_entity_vocab(__a )
lowercase__ : Dict = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ : List[str] = AddedToken("""<ent>""" , lstrip=__a , rstrip=__a )
lowercase__ : int = AddedToken("""<ent2>""" , lstrip=__a , rstrip=__a )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__a , __a )
lowercase__ : Optional[Any] = LukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
lowercase__ : List[str] = state_dict["""embeddings.word_embeddings.weight"""]
lowercase__ : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
lowercase__ : Dict = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
lowercase__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ : str = F'encoder.layer.{layer_index}.attention.self.'
lowercase__ : Optional[int] = state_dict[prefix + matrix_name]
lowercase__ : Any = state_dict[prefix + matrix_name]
lowercase__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ : Tuple = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowercase__ : Union[str, Any] = entity_emb[entity_vocab["""[MASK]"""]]
lowercase__ : Tuple = LukeModel(config=__a ).eval()
lowercase__ , lowercase__ : Optional[int] = model.load_state_dict(__a , strict=__a )
if not (len(__a ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(__a )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
lowercase__ : List[Any] = LukeTokenizer.from_pretrained(__a , task="""entity_classification""" )
lowercase__ : Tuple = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
lowercase__ : Union[str, Any] = (39, 42)
lowercase__ : Union[str, Any] = tokenizer(__a , entity_spans=[span] , add_prefix_space=__a , return_tensors="""pt""" )
lowercase__ : Tuple = model(**__a )
# Verify word hidden states
if model_size == "large":
lowercase__ : List[str] = torch.Size((1, 42, 10_24) )
lowercase__ : Optional[Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
lowercase__ : int = torch.Size((1, 42, 7_68) )
lowercase__ : List[str] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase__ : Dict = torch.Size((1, 1, 10_24) )
lowercase__ : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
lowercase__ : Union[str, Any] = torch.Size((1, 1, 7_68) )
lowercase__ : Dict = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__a ) )
model.save_pretrained(__a )
def UpperCamelCase ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : Tuple = {}
with open(__a , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(__a ):
lowercase__ , lowercase__ : Dict = line.rstrip().split("""\t""" )
lowercase__ : Tuple = index
return entity_vocab
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCamelCase__ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 720 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : int = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Tuple = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Dict = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : List[Any] = ['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""flax"""])
| 495 | 0 |
'''simple docstring'''
import numpy as np
__snake_case : Optional[Any] = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class A :
def __init__( self ) -> None:
_a = np.array(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> np.ndarray:
_a , _a = np.where(letter == self.SQUARE )
_a = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> str:
_a = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __lowerCAmelCase ( self , snake_case_ ) -> str:
_a = message.lower()
_a = message.replace(" " , "" )
_a = message.replace("j" , "i" )
_a = np.empty((2, len(snake_case_ )) )
for letter_index in range(len(snake_case_ ) ):
_a = self.letter_to_numbers(message[letter_index] )
_a = numbers[0]
_a = numbers[1]
_a = first_step.reshape(2 * len(snake_case_ ) )
_a = ""
for numbers_index in range(len(snake_case_ ) ):
_a = int(second_step[numbers_index * 2] )
_a = int(second_step[(numbers_index * 2) + 1] )
_a = self.numbers_to_letter(snake_case_ , snake_case_ )
_a = encoded_message + letter
return encoded_message
def __lowerCAmelCase ( self , snake_case_ ) -> str:
_a = message.lower()
message.replace(" " , "" )
_a = np.empty(2 * len(snake_case_ ) )
for letter_index in range(len(snake_case_ ) ):
_a = self.letter_to_numbers(message[letter_index] )
_a = numbers[0]
_a = numbers[1]
_a = first_step.reshape((2, len(snake_case_ )) )
_a = ""
for numbers_index in range(len(snake_case_ ) ):
_a = int(second_step[0, numbers_index] )
_a = int(second_step[1, numbers_index] )
_a = self.numbers_to_letter(snake_case_ , snake_case_ )
_a = decoded_message + letter
return decoded_message
| 131 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : int | str ):
_a = str(lowerCamelCase__ )
return n == n[::-1]
def _lowercase ( lowerCamelCase__ : int = 1_000_000 ):
_a = 0
for i in range(1, lowerCamelCase__ ):
if is_palindrome(lowerCamelCase__ ) and is_palindrome(bin(lowerCamelCase__ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 131 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class a__ ( UpperCamelCase_ ):
snake_case__ = '''mra'''
def __init__( self : str ,a__ : List[Any]=5_0265 ,a__ : Optional[Any]=768 ,a__ : List[Any]=12 ,a__ : Any=12 ,a__ : List[Any]=3072 ,a__ : Optional[Any]="gelu" ,a__ : int=0.1 ,a__ : Any=0.1 ,a__ : List[Any]=512 ,a__ : Dict=1 ,a__ : List[str]=0.02 ,a__ : List[Any]=1E-5 ,a__ : Union[str, Any]="absolute" ,a__ : int=4 ,a__ : Dict="full" ,a__ : Dict=0 ,a__ : Optional[Any]=0 ,a__ : Tuple=1 ,a__ : str=0 ,a__ : List[Any]=2 ,**a__ : Optional[int] ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,**a__)
_lowerCAmelCase:List[str] = vocab_size
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:List[Any] = hidden_size
_lowerCAmelCase:List[str] = num_hidden_layers
_lowerCAmelCase:List[Any] = num_attention_heads
_lowerCAmelCase:str = intermediate_size
_lowerCAmelCase:Optional[int] = hidden_act
_lowerCAmelCase:Tuple = hidden_dropout_prob
_lowerCAmelCase:Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase:Tuple = initializer_range
_lowerCAmelCase:Tuple = type_vocab_size
_lowerCAmelCase:List[str] = layer_norm_eps
_lowerCAmelCase:List[str] = position_embedding_type
_lowerCAmelCase:Optional[Any] = block_per_row
_lowerCAmelCase:Dict = approx_mode
_lowerCAmelCase:Optional[Any] = initial_prior_first_n_blocks
_lowerCAmelCase:int = initial_prior_diagonal_n_blocks
| 705 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase ( snake_case : Tuple ):
_lowerCAmelCase:Tuple = 384
if "tiny" in model_name:
_lowerCAmelCase:Any = [3, 3, 9, 3]
_lowerCAmelCase:Union[str, Any] = [96, 192, 384, 768]
if "small" in model_name:
_lowerCAmelCase:List[str] = [3, 3, 27, 3]
_lowerCAmelCase:Any = [96, 192, 384, 768]
if "base" in model_name:
_lowerCAmelCase:int = [3, 3, 27, 3]
_lowerCAmelCase:int = [128, 256, 512, 1024]
_lowerCAmelCase:Any = 512
if "large" in model_name:
_lowerCAmelCase:Optional[int] = [3, 3, 27, 3]
_lowerCAmelCase:List[str] = [192, 384, 768, 1536]
_lowerCAmelCase:List[Any] = 768
if "xlarge" in model_name:
_lowerCAmelCase:str = [3, 3, 27, 3]
_lowerCAmelCase:Optional[int] = [256, 512, 1024, 2048]
_lowerCAmelCase:Optional[int] = 1024
# set label information
_lowerCAmelCase:Tuple = 150
_lowerCAmelCase:int = '''huggingface/label-files'''
_lowerCAmelCase:Optional[int] = '''ade20k-id2label.json'''
_lowerCAmelCase:List[str] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowerCAmelCase:Dict = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase:Tuple = {v: k for k, v in idalabel.items()}
_lowerCAmelCase:str = ConvNextConfig(
depths=snake_case , hidden_sizes=snake_case , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_lowerCAmelCase:Union[str, Any] = UperNetConfig(
backbone_config=snake_case , auxiliary_in_channels=snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case , )
return config
def UpperCAmelCase ( snake_case : List[Any] ):
_lowerCAmelCase:List[str] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def UpperCAmelCase ( snake_case : int , snake_case : Tuple , snake_case : Optional[int] ):
_lowerCAmelCase:int = dct.pop(snake_case )
_lowerCAmelCase:Union[str, Any] = val
def UpperCAmelCase ( snake_case : List[str] , snake_case : int , snake_case : Optional[Any] ):
_lowerCAmelCase:Dict = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
_lowerCAmelCase:Optional[Any] = model_name_to_url[model_name]
_lowerCAmelCase:Optional[Any] = torch.hub.load_state_dict_from_url(snake_case , map_location='''cpu''' )['''state_dict''']
_lowerCAmelCase:Tuple = get_upernet_config(snake_case )
_lowerCAmelCase:Union[str, Any] = UperNetForSemanticSegmentation(snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase:str = state_dict.pop(snake_case )
if "bn" in key:
_lowerCAmelCase:str = key.replace('''bn''' , '''batch_norm''' )
_lowerCAmelCase:Optional[Any] = val
# rename keys
_lowerCAmelCase:int = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
model.load_state_dict(snake_case )
# verify on image
_lowerCAmelCase:Optional[int] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
_lowerCAmelCase:Optional[int] = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('''RGB''' )
_lowerCAmelCase:int = SegformerImageProcessor()
_lowerCAmelCase:Optional[int] = processor(snake_case , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
_lowerCAmelCase:str = model(snake_case )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase:Union[str, Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase:List[str] = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase:Dict = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase:Union[str, Any] = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase:List[str] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F"upernet-convnext-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 439 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def _snake_case ( lowerCAmelCase : Iterable[str] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = iter(lowerCAmelCase )
while True:
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(itertools.islice(lowerCAmelCase , lowerCAmelCase ) )
if not chunk:
return
yield chunk
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ""
if len(lowerCAmelCase ) < 2:
return dirty
for i in range(len(lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCAmelCase ) & 1:
clean += "X"
return clean
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE_ : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCAmelCase )
return table
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = generate_table(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = prepare_input(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = divmod(table.index(lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(table.index(lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_table(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = divmod(table.index(lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = divmod(table.index(lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 216 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[Any] = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216 | 1 |
'''simple docstring'''
import os
def _lowerCAmelCase ( ):
with open(os.path.dirname(lowerCamelCase_ ) + '''/p022_names.txt''' ) as file:
__lowercase = str(file.readlines()[0] )
__lowercase = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
__lowercase = 0
__lowercase = 0
for i, name in enumerate(lowerCamelCase_ ):
for letter in name:
name_score += ord(lowerCamelCase_ ) - 6_4
total_score += (i + 1) * name_score
__lowercase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56 | 1 |
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
return str(_snake_case ) == str(_snake_case )[::-1]
def lowerCAmelCase_ ( _snake_case : int ) -> int:
'''simple docstring'''
return int(_snake_case ) + int(str(_snake_case )[::-1] )
def lowerCAmelCase_ ( _snake_case : int = 10000 ) -> int:
'''simple docstring'''
__magic_name__ : str = []
for num in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
__magic_name__ : Tuple = num
while iterations < 50:
__magic_name__ : List[str] = sum_reverse(_snake_case )
iterations += 1
if is_palindrome(_snake_case ):
break
else:
lychrel_nums.append(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 124 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=False , _a=False , _a=False , _a=2 , _a=99 , _a=0 , _a=32 , _a=5 , _a=4 , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=2 , _a=4 , _a="last" , _a=True , _a=None , _a=0 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Optional[Any] = seq_length
__magic_name__ : Tuple = is_training
__magic_name__ : str = use_input_lengths
__magic_name__ : List[str] = use_token_type_ids
__magic_name__ : Optional[Any] = use_labels
__magic_name__ : Union[str, Any] = gelu_activation
__magic_name__ : Optional[int] = sinusoidal_embeddings
__magic_name__ : Tuple = causal
__magic_name__ : Tuple = asm
__magic_name__ : Dict = n_langs
__magic_name__ : Tuple = vocab_size
__magic_name__ : Tuple = n_special
__magic_name__ : Optional[Any] = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : str = type_sequence_label_size
__magic_name__ : int = initializer_range
__magic_name__ : Optional[int] = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : int = summary_type
__magic_name__ : int = use_proj
__magic_name__ : Any = scope
__magic_name__ : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_input_lengths:
__magic_name__ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__magic_name__ : Optional[int] = None
if self.use_token_type_ids:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__magic_name__ : Dict = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : str = ids_tensor([self.batch_size] , 2 ).float()
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : int = XLMModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Any = model(_a , lengths=_a , langs=_a )
__magic_name__ : Any = model(_a , langs=_a )
__magic_name__ : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Union[str, Any] = XLMWithLMHeadModel(_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Optional[Any] = XLMForQuestionAnsweringSimple(_a )
model.to(_a )
model.eval()
__magic_name__ : int = model(_a )
__magic_name__ : Optional[int] = model(_a , start_positions=_a , end_positions=_a )
__magic_name__ : List[str] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Tuple = XLMForQuestionAnswering(_a )
model.to(_a )
model.eval()
__magic_name__ : List[Any] = model(_a )
__magic_name__ : Union[str, Any] = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , )
__magic_name__ : str = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , )
((__magic_name__) , ) : Optional[Any] = result_with_labels.to_tuple()
__magic_name__ : int = model(_a , start_positions=_a , end_positions=_a )
((__magic_name__) , ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : List[str] = XLMForSequenceClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : Union[str, Any] = model(_a )
__magic_name__ : Union[str, Any] = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Union[str, Any] = XLMForTokenClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : List[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : List[Any] = self.num_choices
__magic_name__ : Optional[int] = XLMForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__magic_name__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = config_and_inputs
__magic_name__ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=False ):
__magic_name__ : Union[str, Any] = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__magic_name__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
__magic_name__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = XLMModelTester(self )
__magic_name__ : str = ConfigTester(self , config_class=_a , emb_dim=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ):
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_attentions in attentions] , [True] * len(_a ) )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_a ):
# adds PAD dummy token
__magic_name__ : str = min_length + idx + 1
__magic_name__ : Dict = min_length + idx + 1
__magic_name__ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a ) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ):
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_hidden_states in hidden_states] , [True] * len(_a ) , )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_a ):
# adds PAD dummy token
__magic_name__ : str = min_length + idx + 1
__magic_name__ : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Any = XLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_a )
__magic_name__ : Optional[int] = torch.tensor([[14, 447]] , dtype=torch.long , device=_a ) # the president
__magic_name__ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__magic_name__ : Optional[int] = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a )
| 124 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def __a():
'''simple docstring'''
_lowerCAmelCase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=SCREAMING_SNAKE_CASE_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=SCREAMING_SNAKE_CASE_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=SCREAMING_SNAKE_CASE_ , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=SCREAMING_SNAKE_CASE_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=SCREAMING_SNAKE_CASE_ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=SCREAMING_SNAKE_CASE_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
_lowerCAmelCase = parser.parse_args()
return args
def __a(SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
def fn(SCREAMING_SNAKE_CASE_ : Optional[int] ):
return tokenizer(examples["text"] )
return fn
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(len(tokenized_data["input_ids"] ) ):
_lowerCAmelCase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_lowerCAmelCase = tf.train.Features(feature=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = tf.train.Example(features=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE_ )
return records
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_lowerCAmelCase = min(len(SCREAMING_SNAKE_CASE_ ) , args.limit )
_lowerCAmelCase = dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
else:
_lowerCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_lowerCAmelCase = tokenize_function(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
# Concatenate all texts.
_lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCAmelCase = dataset_tokenized.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=1000 , num_proc=4 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE_ ) , args.shard_size ):
_lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_lowerCAmelCase = len(dataset_snapshot["input_ids"] )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_lowerCAmelCase = get_serialized_examples(SCREAMING_SNAKE_CASE_ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE_ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCAmelCase = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE_ )
print("Wrote file {} containing {} records".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , "w" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = parse_args()
main(args)
| 489 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 489 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__a: Dict = logging.get_logger(__name__)
__a: Any = '''Hello, World!'''
__a: List[Any] = '''en_XX'''
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> List[str]:
_UpperCAmelCase = Path("""data_bin""" )
_UpperCAmelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__snake_case ).parent ) , checkpoint_file=Path(__snake_case ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(__snake_case ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(__snake_case ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(__snake_case )
_UpperCAmelCase = xmod.model.encoder.sentence_encoder
_UpperCAmelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_UpperCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , __snake_case )
_UpperCAmelCase = XmodForSequenceClassification(__snake_case ) if classification_head else XmodForMaskedLM(__snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
_UpperCAmelCase = xmod_sent_encoder.embed_tokens.weight
_UpperCAmelCase = xmod_sent_encoder.embed_positions.weight
_UpperCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_UpperCAmelCase = xmod_sent_encoder.layernorm_embedding.weight
_UpperCAmelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_UpperCAmelCase = model.roberta.encoder.layer[i]
_UpperCAmelCase = xmod_sent_encoder.layers[i]
# self attention
_UpperCAmelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
_UpperCAmelCase = xmod_layer.self_attn.q_proj.weight
_UpperCAmelCase = xmod_layer.self_attn.q_proj.bias
_UpperCAmelCase = xmod_layer.self_attn.k_proj.weight
_UpperCAmelCase = xmod_layer.self_attn.k_proj.bias
_UpperCAmelCase = xmod_layer.self_attn.v_proj.weight
_UpperCAmelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
_UpperCAmelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
_UpperCAmelCase = xmod_layer.self_attn.out_proj.weight
_UpperCAmelCase = xmod_layer.self_attn.out_proj.bias
_UpperCAmelCase = xmod_layer.self_attn_layer_norm.weight
_UpperCAmelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
_UpperCAmelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
_UpperCAmelCase = xmod_layer.fca.weight
_UpperCAmelCase = xmod_layer.fca.bias
# output
_UpperCAmelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
_UpperCAmelCase = xmod_layer.fca.weight
_UpperCAmelCase = xmod_layer.fca.bias
_UpperCAmelCase = xmod_layer.final_layer_norm.weight
_UpperCAmelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_UpperCAmelCase = xmod_layer.adapter_layer_norm.weight
_UpperCAmelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_UpperCAmelCase = bert_output.adapter_modules[lang_code]
_UpperCAmelCase = xmod_layer.adapter_modules[lang_code]
_UpperCAmelCase = from_adapter.fca.weight
_UpperCAmelCase = from_adapter.fca.bias
_UpperCAmelCase = from_adapter.fca.weight
_UpperCAmelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_UpperCAmelCase = xmod_sent_encoder.layer_norm.weight
_UpperCAmelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
_UpperCAmelCase = xmod.model.classification_heads["""mnli"""].dense.weight
_UpperCAmelCase = xmod.model.classification_heads["""mnli"""].dense.bias
_UpperCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight
_UpperCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
_UpperCAmelCase = xmod.model.encoder.lm_head.dense.weight
_UpperCAmelCase = xmod.model.encoder.lm_head.dense.bias
_UpperCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight
_UpperCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias
_UpperCAmelCase = xmod.model.encoder.lm_head.weight
_UpperCAmelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_UpperCAmelCase = xmod.encode(__snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__snake_case )
_UpperCAmelCase = model(__snake_case )[0]
if classification_head:
_UpperCAmelCase = xmod.model.classification_heads["""mnli"""](xmod.extract_features(__snake_case ) )
else:
_UpperCAmelCase = xmod.model(__snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_UpperCAmelCase = torch.allclose(__snake_case , __snake_case , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(__snake_case ).mkdir(parents=__snake_case , exist_ok=__snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__a: Union[str, Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 108 | import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCAmelCase ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCAmelCase ):
http_head('''https://huggingface.co''' )
| 604 | 0 |
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( __a : Callable , __a : float , __a : float , __a : float , __a : float ) -> np.ndarray:
_lowercase =int(np.ceil((x_end - xa) / step_size ) )
_lowercase =np.zeros((n + 1,) )
_lowercase =ya
_lowercase =xa
for k in range(_A ):
_lowercase =y[k] + step_size * ode_func(_A , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase__ = None
lowerCAmelCase__ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase__ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "PIL.Image.Image"
__SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__SCREAMING_SNAKE_CASE = field(default='Image' , init=lowerCamelCase_ , repr=lowerCamelCase_ )
def __call__( self ):
return self.pa_type
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =np.array(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
_lowercase ={}
_lowercase , _lowercase =value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase_ ):
_lowercase =PIL.Image.open(lowerCAmelCase_ )
else:
_lowercase =path.split("::" )[-1]
try:
_lowercase =string_to_dict(lowerCAmelCase_ , config.HUB_DATASETS_URL )["repo_id"]
_lowercase =token_per_repo_id.get(lowerCAmelCase_ )
except ValueError:
_lowercase =None
with xopen(lowerCAmelCase_ , "rb" , use_auth_token=lowerCAmelCase_ ) as f:
_lowercase =BytesIO(f.read() )
_lowercase =PIL.Image.open(bytes_ )
else:
_lowercase =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowerCAmelCase ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if pa.types.is_string(storage.type ):
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
_lowercase =pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowercase =storage.field("bytes" )
else:
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowercase =storage.field("path" )
else:
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_lowercase =pa.array(
[encode_np_array(np.array(lowerCAmelCase_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ ):
with xopen(lowerCAmelCase_ , "rb" ) as f:
_lowercase =f.read()
return bytes_
_lowercase =pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowercase =pa.array(
[os.path.basename(lowerCAmelCase_ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
_lowercase =pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_lowercase =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( __a : "PIL.Image.Image" ) -> bytes:
_lowercase =BytesIO()
if image.format in list_image_compression_formats():
_lowercase =image.format
else:
_lowercase ="PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__a , format=__a )
return buffer.getvalue()
def __lowerCamelCase ( __a : "PIL.Image.Image" ) -> dict:
if hasattr(__a , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__a )}
def __lowerCamelCase ( __a : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
_lowercase =array.dtype
_lowercase =dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
_lowercase =dtype.kind
_lowercase =dtype.itemsize
_lowercase =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_lowercase =np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_lowercase =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_lowercase =dtype_byteorder + dtype_kind + str(__a )
_lowercase =np.dtype(__a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
_lowercase =PIL.Image.fromarray(array.astype(__a ) )
return {"path": None, "bytes": image_to_bytes(__a )}
def __lowerCamelCase ( __a : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
_lowercase , _lowercase =first_non_null_value(__a )
if isinstance(__a , __a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__a , np.ndarray ):
_lowercase =no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
elif isinstance(__a , PIL.Image.Image ):
_lowercase =no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
else:
return objs
else:
return objs
| 594 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a_ = 6_378_137.0
a_ = 6_356_752.314_245
a_ = 6378137
def __lowerCAmelCase ( A_ : float , A_ : float , A_ : float , A_ : float ) -> float:
__UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__UpperCAmelCase = atan((1 - flattening) * tan(radians(A_ ) ) )
__UpperCAmelCase = atan((1 - flattening) * tan(radians(A_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__UpperCAmelCase = haversine_distance(A_ , A_ , A_ , A_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__UpperCAmelCase = (b_lata + b_lata) / 2
__UpperCAmelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__UpperCAmelCase = (sin(A_ ) ** 2) * (cos(A_ ) ** 2)
__UpperCAmelCase = cos(sigma / 2 ) ** 2
__UpperCAmelCase = (sigma - sin(A_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__UpperCAmelCase = (cos(A_ ) ** 2) * (sin(A_ ) ** 2)
__UpperCAmelCase = sin(sigma / 2 ) ** 2
__UpperCAmelCase = (sigma + sin(A_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a_ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
a_ = F"https://www.google.com/search?q={query}&num=100"
a_ = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
a_ = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
a_ = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 221 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( a, a) ->Any:
lowerCamelCase__ = nn.functional.normalize(a)
lowerCamelCase__ = nn.functional.normalize(a)
return torch.mm(a, normalized_text_embeds.t())
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = CLIPConfig
A__ = ["CLIPEncoderLayer"]
def __init__( self , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase )
lowerCamelCase__ = CLIPVisionModel(config.vision_config )
lowerCamelCase__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowerCAmelCase )
lowerCamelCase__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_lowerCAmelCase )
lowerCamelCase__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowerCAmelCase )
lowerCamelCase__ = nn.Parameter(torch.ones(17 ) , requires_grad=_lowerCAmelCase )
lowerCamelCase__ = nn.Parameter(torch.ones(3 ) , requires_grad=_lowerCAmelCase )
@torch.no_grad()
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = self.vision_model(_lowerCAmelCase )[1] # pooled_output
lowerCamelCase__ = self.visual_projection(_lowerCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ = cosine_distance(_lowerCAmelCase , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase__ = cosine_distance(_lowerCAmelCase , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase__ = []
lowerCamelCase__ = image_embeds.shape[0]
for i in range(_lowerCAmelCase ):
lowerCamelCase__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase__ = special_cos_dist[i][concept_idx]
lowerCamelCase__ = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
lowerCamelCase__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase__ = cos_dist[i][concept_idx]
lowerCamelCase__ = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
lowerCamelCase__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = self.vision_model(_lowerCAmelCase )[1] # pooled_output
lowerCamelCase__ = self.visual_projection(_lowerCAmelCase )
lowerCamelCase__ = cosine_distance(_lowerCAmelCase , self.special_care_embeds )
lowerCamelCase__ = cosine_distance(_lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ = 0.0
lowerCamelCase__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase__ = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase__ = special_care * 0.01
lowerCamelCase__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 701 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"squeezebert/squeezebert-uncased": 5_1_2,
"squeezebert/squeezebert-mnli": 5_1_2,
"squeezebert/squeezebert-mnli-headless": 5_1_2,
}
lowercase__ = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = SqueezeBertTokenizer
def __init__( self : Optional[Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="[UNK]" , UpperCamelCase__ : Optional[int]="[SEP]" , UpperCamelCase__ : str="[PAD]" , UpperCamelCase__ : int="[CLS]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase__ ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(UpperCamelCase__ , normalizer_state.pop('''type''' ) )
snake_case : Tuple = do_lower_case
snake_case : Optional[int] = strip_accents
snake_case : Any = tokenize_chinese_chars
snake_case : List[str] = normalizer_class(**UpperCamelCase__ )
snake_case : Dict = do_lower_case
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=None ) -> Any:
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case : Dict = [self.sep_token_id]
snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 638 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Any = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
snake_case : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case : Any = model(UpperCamelCase__ )['''last_hidden_state''']
snake_case : int = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
snake_case : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 638 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowercase__ : str = 2_99_79_24_58
# Symbols
lowercase__ : Dict = symbols('''ct x y z''')
def _lowerCAmelCase ( __snake_case : float ) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def _lowerCAmelCase ( __snake_case : float ) -> float:
return 1 / sqrt(1 - beta(__snake_case ) ** 2 )
def _lowerCAmelCase ( __snake_case : float ) -> np.ndarray:
return np.array(
[
[gamma(__snake_case ), -gamma(__snake_case ) * beta(__snake_case ), 0, 0],
[-gamma(__snake_case ) * beta(__snake_case ), gamma(__snake_case ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _lowerCAmelCase ( __snake_case : float , __snake_case : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
__A : Dict = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__snake_case ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowercase__ : int = transform(29_97_92_45)
print('''Example of four vector: ''')
print(f"""ct' = {four_vector[0]}""")
print(f"""x' = {four_vector[1]}""")
print(f"""y' = {four_vector[2]}""")
print(f"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
lowercase__ : Optional[int] = {ct: c, x: 1, y: 1, z: 1}
lowercase__ : int = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"""\n{numerical_vector}""") | 710 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''git_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : str = hidden_size
__A : List[Any] = intermediate_size
__A : Any = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Dict = num_channels
__A : int = patch_size
__A : str = image_size
__A : Union[str, Any] = initializer_range
__A : int = attention_dropout
__A : Dict = layer_norm_eps
__A : int = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase)
__A ,__A : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type') == "git":
__A : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''git'''
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=101 , _UpperCAmelCase=102 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
if vision_config is None:
__A : str = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.')
__A : List[Any] = GitVisionConfig(**_UpperCAmelCase)
__A : int = vocab_size
__A : Optional[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Dict = hidden_act
__A : Optional[int] = intermediate_size
__A : str = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : str = initializer_range
__A : int = layer_norm_eps
__A : Tuple = position_embedding_type
__A : Dict = use_cache
__A : Tuple = tie_word_embeddings
__A : Optional[Any] = num_image_with_embedding
__A : Dict = bos_token_id
__A : Tuple = eos_token_id
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = copy.deepcopy(self.__dict__)
__A : Any = self.vision_config.to_dict()
__A : List[str] = self.__class__.model_type
return output | 338 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowerCAmelCase = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(_snake_case , return_tensors='np' )
lowerCAmelCase = processor(images=_snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = processor(text=_snake_case )
lowerCAmelCase = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(_snake_case )
lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | def lowerCAmelCase_ ( __A ) -> list:
'''simple docstring'''
for i in range(len(__A ) - 1, 0, -1 ):
UpperCAmelCase__ = False
for j in range(__A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase__ , UpperCAmelCase__ = unsorted[j - 1], unsorted[j]
UpperCAmelCase__ = True
for j in range(__A ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase__ , UpperCAmelCase__ = unsorted[j + 1], unsorted[j]
UpperCAmelCase__ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(',')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 486 | 0 |
"""simple docstring"""
UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase : List[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase : Union[str, Any] = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowercase_ = year // 1_00
lowercase_ = (5 * (century % 4) + 2) % 7
lowercase_ = year % 1_00
lowercase_ = centurian % 12
lowercase_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowercase_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowercase_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
lowercase_ , lowercase_ = analyze_text(__lowerCAmelCase )
lowercase_ = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowercase_ = sum(single_char_strings.values() )
# one length string
lowercase_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase_ = single_char_strings[ch]
lowercase_ = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowercase_ = sum(two_char_strings.values() )
lowercase_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase_ = cha + cha
if sequence in two_char_strings:
lowercase_ = two_char_strings[sequence]
lowercase_ = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[dict, dict]:
'''simple docstring'''
lowercase_ = Counter() # type: ignore
lowercase_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 100 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ : str = ProphetNetTokenizer
__magic_name__ : Dict = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
__UpperCamelCase : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = """UNwant\u00E9d,running"""
__UpperCamelCase : List[str] = """unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__UpperCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file )
__UpperCamelCase : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Any = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__UpperCamelCase : int = BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__UpperCamelCase : List[Any] = {}
for i, token in enumerate(lowerCAmelCase ):
__UpperCamelCase : Dict = i
__UpperCamelCase : Any = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : str = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__UpperCamelCase : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCamelCase : Optional[int] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__UpperCamelCase : Optional[int] = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[str] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__UpperCamelCase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase )
__UpperCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase )
__UpperCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 279 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def A__ (snake_case : List[Any] ) -> Optional[Any]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def A__ (snake_case : int ) -> Any:
__UpperCamelCase : List[str] = create_tensor(snake_case )
__UpperCamelCase : List[Any] = gather(snake_case )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def A__ (snake_case : Optional[Any] ) -> Dict:
__UpperCamelCase : Any = [state.process_index]
__UpperCamelCase : int = gather_object(snake_case )
assert len(snake_case ) == state.num_processes, F'''{gathered_obj}, {len(snake_case )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def A__ (snake_case : Any ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = create_tensor(snake_case )
__UpperCamelCase : Any = broadcast(snake_case )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def A__ (snake_case : Tuple ) -> Dict:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__UpperCamelCase : List[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__UpperCamelCase : List[str] = torch.arange(state.num_processes ).to(state.device )
__UpperCamelCase : List[str] = pad_across_processes(snake_case )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def A__ (snake_case : Tuple ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
__UpperCamelCase : Optional[int] = create_tensor(snake_case )
__UpperCamelCase : str = reduce(snake_case , """sum""" )
__UpperCamelCase : Optional[int] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case , snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def A__ (snake_case : Optional[Any] ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
__UpperCamelCase : List[Any] = create_tensor(snake_case )
__UpperCamelCase : str = reduce(snake_case , """mean""" )
__UpperCamelCase : Union[str, Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case , snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def A__ (snake_case : Tuple ) -> int:
# For xla_spawn (TPUs)
main()
def A__ () -> List[Any]:
__UpperCamelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(snake_case )
state.print("""testing gather_object""" )
test_gather_object(snake_case )
state.print("""testing broadcast""" )
test_broadcast(snake_case )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case )
if __name__ == "__main__":
main()
| 279 | 1 |
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCamelCase_ = logging.getLogger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="summarization"
UpperCamelCase__ : Union[str, Any] =["loss"]
UpperCamelCase__ : Tuple =ROUGE_KEYS
UpperCamelCase__ : List[Any] ="rouge2"
def __init__( self :int , _lowercase :List[Any] , **_lowercase :Tuple) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(_lowercase , num_labels=_lowercase , mode=self.mode , **_lowercase)
use_task_specific_params(self.model , '''summarization''')
save_git_info(self.hparams.output_dir)
UpperCAmelCase_ = Path(self.output_dir) / '''metrics.json'''
UpperCAmelCase_ = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path)
UpperCAmelCase_ = 0
UpperCAmelCase_ = defaultdict(_lowercase)
UpperCAmelCase_ = self.config.model_type
UpperCAmelCase_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
UpperCAmelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
UpperCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
UpperCAmelCase_ = get_git_info()['''repo_sha''']
UpperCAmelCase_ = hparams.num_workers
UpperCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowercase):
UpperCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ = self.decoder_start_token_id
UpperCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
UpperCAmelCase_ = False
UpperCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ = self.model.config.max_length
UpperCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __a ( self :List[Any] , _lowercase :Dict[str, torch.Tensor]) -> Dict[str, List[str]]:
UpperCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_lowercase , Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / '''tok_batch.json''')
UpperCAmelCase_ = True
return readable_batch
def __a ( self :Dict , _lowercase :Optional[Any] , **_lowercase :List[Any]) -> str:
return self.model(_lowercase , **_lowercase)
def __a ( self :Tuple , _lowercase :List[int]) -> str:
UpperCAmelCase_ = self.tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase)
return lmap(str.strip , _lowercase)
def __a ( self :List[str] , _lowercase :dict) -> Tuple:
UpperCAmelCase_ = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ = batch['''input_ids'''], batch['''attention_mask''']
UpperCAmelCase_ = batch['''labels''']
if isinstance(self.model , _lowercase):
UpperCAmelCase_ = self.model._shift_right(_lowercase)
else:
UpperCAmelCase_ = shift_tokens_right(_lowercase , _lowercase)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ = decoder_input_ids
self.save_readable_batch(_lowercase)
UpperCAmelCase_ = self(_lowercase , attention_mask=_lowercase , decoder_input_ids=_lowercase , use_cache=_lowercase)
UpperCAmelCase_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ = nn.CrossEntropyLoss(ignore_index=_lowercase)
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
UpperCAmelCase_ = nn.functional.log_softmax(_lowercase , dim=-1)
UpperCAmelCase_ , UpperCAmelCase_ = label_smoothed_nll_loss(
_lowercase , _lowercase , self.hparams.label_smoothing , ignore_index=_lowercase)
return (loss,)
@property
def __a ( self :List[Any]) -> int:
return self.tokenizer.pad_token_id
def __a ( self :Any , _lowercase :Tuple , _lowercase :Optional[int]) -> Dict:
UpperCAmelCase_ = self._step(_lowercase)
UpperCAmelCase_ = dict(zip(self.loss_names , _lowercase))
# tokens per batch
UpperCAmelCase_ = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].shape[0]
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __a ( self :Union[str, Any] , _lowercase :int , _lowercase :List[Any]) -> Dict:
return self._generative_step(_lowercase)
def __a ( self :int , _lowercase :List[str] , _lowercase :List[Any]="val") -> Dict:
self.step_count += 1
UpperCAmelCase_ = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
UpperCAmelCase_ = losses['''loss''']
UpperCAmelCase_ = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
UpperCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ = torch.tensor(_lowercase).type_as(_lowercase)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(_lowercase)
UpperCAmelCase_ = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase_ = self.step_count
self.metrics[prefix].append(_lowercase) # callback writes this to self.metrics_save_path
UpperCAmelCase_ = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __a ( self :int , _lowercase :Optional[int] , _lowercase :Dict) -> Dict:
return calculate_rouge(_lowercase , _lowercase)
def __a ( self :Optional[Any] , _lowercase :dict) -> dict:
UpperCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
UpperCAmelCase_ = self.ids_to_clean_text(_lowercase)
UpperCAmelCase_ = self.ids_to_clean_text(batch['''labels'''])
UpperCAmelCase_ = self._step(_lowercase)
UpperCAmelCase_ = dict(zip(self.loss_names , _lowercase))
UpperCAmelCase_ = self.calc_generative_metrics(_lowercase , _lowercase)
UpperCAmelCase_ = np.mean(lmap(_lowercase , _lowercase))
base_metrics.update(gen_time=_lowercase , gen_len=_lowercase , preds=_lowercase , target=_lowercase , **_lowercase)
return base_metrics
def __a ( self :Optional[Any] , _lowercase :int , _lowercase :Optional[Any]) -> Optional[int]:
return self._generative_step(_lowercase)
def __a ( self :str , _lowercase :List[Any]) -> List[Any]:
return self.validation_epoch_end(_lowercase , prefix='''test''')
def __a ( self :Union[str, Any] , _lowercase :Optional[int]) -> SeqaSeqDataset:
UpperCAmelCase_ = self.n_obs[type_path]
UpperCAmelCase_ = self.target_lens[type_path]
UpperCAmelCase_ = self.dataset_class(
self.tokenizer , type_path=_lowercase , n_obs=_lowercase , max_target_length=_lowercase , **self.dataset_kwargs , )
return dataset
def __a ( self :str , _lowercase :str , _lowercase :int , _lowercase :bool = False) -> DataLoader:
UpperCAmelCase_ = self.get_dataset(_lowercase)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_sortish_sampler(_lowercase , distributed=self.hparams.gpus > 1)
return DataLoader(
_lowercase , batch_size=_lowercase , collate_fn=dataset.collate_fn , shuffle=_lowercase , num_workers=self.num_workers , sampler=_lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
_lowercase , batch_sampler=_lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowercase , batch_size=_lowercase , collate_fn=dataset.collate_fn , shuffle=_lowercase , num_workers=self.num_workers , sampler=_lowercase , )
def __a ( self :int) -> DataLoader:
UpperCAmelCase_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_lowercase)
return dataloader
def __a ( self :int) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size)
def __a ( self :List[str]) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def __a ( _lowercase :List[Any] , _lowercase :str) -> List[Any]:
BaseTransformer.add_model_specific_args(_lowercase , _lowercase)
add_generic_args(_lowercase , _lowercase)
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''')
parser.add_argument('''--freeze_embeds''' , action='''store_true''')
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_lowercase)
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_lowercase)
parser.add_argument('''--max_tokens_per_batch''' , type=_lowercase , default=_lowercase)
parser.add_argument('''--logger_name''' , type=_lowercase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''')
parser.add_argument('''--n_train''' , type=_lowercase , default=-1 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' , type=_lowercase , default=500 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' , type=_lowercase , default=-1 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' , type=_lowercase , default='''summarization''' , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' , type=_lowercase , default=0.0 , required=_lowercase)
parser.add_argument('''--src_lang''' , type=_lowercase , default='''''' , required=_lowercase)
parser.add_argument('''--tgt_lang''' , type=_lowercase , default='''''' , required=_lowercase)
parser.add_argument('''--eval_beams''' , type=_lowercase , default=_lowercase , required=_lowercase)
parser.add_argument(
'''--val_metric''' , type=_lowercase , default=_lowercase , required=_lowercase , choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' , type=_lowercase , default=_lowercase , help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' , type=_lowercase , default=1 , required=_lowercase , help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' , type=_lowercase , default=-1 , required=_lowercase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class a_ ( _snake_case ):
UpperCamelCase__ : str ="translation"
UpperCamelCase__ : str =["loss"]
UpperCamelCase__ : Optional[int] =["bleu"]
UpperCamelCase__ : List[str] ="bleu"
def __init__( self :Optional[int] , _lowercase :Optional[Any] , **_lowercase :Union[str, Any]) -> int:
super().__init__(_lowercase , **_lowercase)
UpperCAmelCase_ = hparams.src_lang
UpperCAmelCase_ = hparams.tgt_lang
def __a ( self :Dict , _lowercase :str , _lowercase :List[Any]) -> dict:
return calculate_bleu(_lowercase , _lowercase)
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__UpperCAmelCase )
check_output_dir(__UpperCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ = SummarizationModule(__UpperCAmelCase )
else:
UpperCAmelCase_ = TranslationModule(__UpperCAmelCase )
UpperCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
UpperCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = os.environ.get('''WANDB_PROJECT''' , __UpperCAmelCase )
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=__UpperCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = args.val_metric == '''loss'''
UpperCAmelCase_ = generic_train(
__UpperCAmelCase , __UpperCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCAmelCase ) , early_stopping_callback=__UpperCAmelCase , logger=__UpperCAmelCase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__UpperCAmelCase ) )
if checkpoints:
UpperCAmelCase_ = checkpoints[-1]
UpperCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = pl.Trainer.add_argparse_args(parser)
UpperCamelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCamelCase_ = parser.parse_args()
main(args)
| 561 | 0 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = '''T5Config'''
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Any = '''mt5'''
_A : Optional[Any] = MTaConfig
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''mt5'''
_A : Optional[int] = MTaConfig
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : int = '''mt5'''
_A : Union[str, Any] = MTaConfig | 578 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : Optional[int] = None
_A : Optional[jnp.ndarray] = None
_A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls : List[Any] ):
"""simple docstring"""
return cls()
@dataclass
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : jnp.ndarray
_A : jnp.ndarray
_A : KarrasVeSchedulerState
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Tuple , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 1_0_0 , lowerCAmelCase__ : float = 1.0_07 , lowerCAmelCase__ : float = 8_0 , lowerCAmelCase__ : float = 0.05 , lowerCAmelCase__ : float = 5_0 , ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple = () ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = jnp.arange(0 , lowerCAmelCase__ )[::-1].copy()
__SCREAMING_SNAKE_CASE : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase__ , schedule=jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
__SCREAMING_SNAKE_CASE : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__SCREAMING_SNAKE_CASE : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
__SCREAMING_SNAKE_CASE : Any = random.split(lowerCAmelCase__ , num=1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.config.s_noise * random.normal(key=lowerCAmelCase__ , shape=sample.shape )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sigma + gamma * sigma
__SCREAMING_SNAKE_CASE : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample_hat + sigma_hat * model_output
__SCREAMING_SNAKE_CASE : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
__SCREAMING_SNAKE_CASE : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = sample_prev + sigma_prev * model_output
__SCREAMING_SNAKE_CASE : Dict = (sample_prev - pred_original_sample) / sigma_prev
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError() | 578 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = [[float("""inf""" ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
snake_case_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCamelCase ):
# looping through rows of graph array
for i in range(__UpperCamelCase ):
# looping through columns of graph array
for j in range(__UpperCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : Optional[Any] = dist[i][k] + dist[k][j]
_print_dist(__UpperCamelCase , __UpperCamelCase )
return dist, v
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter number of vertices: '''))
__lowerCAmelCase : Optional[Any] = int(input('''Enter number of edges: '''))
__lowerCAmelCase : Any = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
__lowerCAmelCase : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
__lowerCAmelCase : List[Any] = int(input('''Enter source:'''))
__lowerCAmelCase : Tuple = int(input('''Enter destination:'''))
__lowerCAmelCase : List[Any] = float(input('''Enter weight:'''))
__lowerCAmelCase : Dict = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Union[str, Any] = TapasConfig.from_json_file(lowerCamelCase_ )
# set absolute/relative position embeddings parameter
lowerCamelCase : Optional[int] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCamelCase : List[str] = TapasForQuestionAnswering(config=lowerCamelCase_ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCamelCase : int = 4
lowerCamelCase : int = True
# hparam_utils.py hparams
lowerCamelCase : Dict = 0.664694
lowerCamelCase : Dict = 0.207951
lowerCamelCase : Union[str, Any] = 0.121194
lowerCamelCase : int = True
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[Any] = False
lowerCamelCase : List[str] = 0.0352513
lowerCamelCase : Union[str, Any] = TapasForQuestionAnswering(config=lowerCamelCase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCamelCase : Union[str, Any] = 4
lowerCamelCase : Union[str, Any] = False
# hparam_utils.py hparams
lowerCamelCase : int = 36.4519
lowerCamelCase : Dict = 0.903421
lowerCamelCase : Any = 222.088
lowerCamelCase : Dict = True
lowerCamelCase : Optional[Any] = True
lowerCamelCase : int = True
lowerCamelCase : str = 0.763141
lowerCamelCase : int = TapasForQuestionAnswering(config=lowerCamelCase_ )
elif task == "TABFACT":
lowerCamelCase : List[str] = TapasForSequenceClassification(config=lowerCamelCase_ )
elif task == "MLM":
lowerCamelCase : Union[str, Any] = TapasForMaskedLM(config=lowerCamelCase_ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCamelCase : List[str] = TapasModel(config=lowerCamelCase_ )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
lowerCamelCase : Dict = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" ,model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase_ )
print("Used relative position embeddings:" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 311 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=5 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=1_28 ,__lowerCamelCase=32 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=None ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : int = seq_length
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : Dict = use_input_mask
lowerCAmelCase__ : Optional[Any] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : int = num_labels
lowerCAmelCase__ : Optional[int] = num_choices
lowerCAmelCase__ : Optional[int] = scope
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : Any = None
if self.use_input_mask:
lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,)
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = self.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = NezhaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,token_type_ids=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Union[str, Any] = NezhaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : int = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,encoder_attention_mask=__lowerCamelCase ,)
lowerCAmelCase__ : Dict = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,)
lowerCAmelCase__ : Dict = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Any = NezhaForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = NezhaForNextSentencePrediction(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : str = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = NezhaForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,next_sentence_label=__lowerCamelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = NezhaForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : int = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,start_positions=__lowerCamelCase ,end_positions=__lowerCamelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.num_labels
lowerCAmelCase__ : List[Any] = NezhaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : int = NezhaForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = self.num_choices
lowerCAmelCase__ : List[Any] = NezhaForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : List[Any] = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ =(
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ =True
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = super()._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCAmelCase__ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__lowerCamelCase )
lowerCAmelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = NezhaModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ : Dict = None
self.model_tester.create_and_check_model_as_decoder(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,)
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = NezhaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@slow
@require_torch_gpu
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : str = model_class(config=__lowerCamelCase )
lowerCAmelCase__ : Dict = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.jit.trace(
__lowerCamelCase ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCamelCase ,os.path.join(__lowerCamelCase ,'''bert.pt''' ) )
lowerCAmelCase__ : Union[str, Any] = torch.jit.load(os.path.join(__lowerCamelCase ,'''bert.pt''' ) ,map_location=__lowerCamelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCamelCase ) ,inputs_dict['''attention_mask'''].to(__lowerCamelCase ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : int = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0]
lowerCAmelCase__ : Optional[int] = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape ,__lowerCamelCase )
lowerCAmelCase__ : Tuple = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
lowerCAmelCase__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0]
lowerCAmelCase__ : Optional[Any] = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCamelCase ,atol=1e-4 ) )
| 647 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 213 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase = {
'unc-nlp/lxmert-base-uncased': 5_12,
}
__lowerCamelCase = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = LxmertTokenizer
def __init__( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Tuple=True , __snake_case : Optional[int]="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : int="[PAD]" , __snake_case : Optional[Any]="[CLS]" , __snake_case : int="[MASK]" , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=None , **__snake_case : List[str] , ) -> Optional[int]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
__magic_name__: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __snake_case ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __snake_case ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __snake_case ) != tokenize_chinese_chars
):
__magic_name__: List[Any] = getattr(__snake_case , normalizer_state.pop("""type""" ) )
__magic_name__: Dict = do_lower_case
__magic_name__: List[Any] = strip_accents
__magic_name__: List[str] = tokenize_chinese_chars
__magic_name__: Tuple = normalizer_class(**__snake_case )
__magic_name__: Tuple = do_lower_case
def lowerCamelCase__ ( self : List[Any] , __snake_case : str , __snake_case : int=None ) -> List[str]:
__magic_name__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
__magic_name__: List[str] = [self.sep_token_id]
__magic_name__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
__magic_name__: List[str] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 213 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
a = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
a = {
"google/rembert": 256,
}
a = "▁"
class _A ( __lowercase ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = RemBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__A ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__A ) )
return
_UpperCAmelCase = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,) | 518 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
lowerCamelCase : str = hex_num[0] == "-"
if is_negative:
lowerCamelCase : Dict = hex_num[1:]
try:
lowerCamelCase : List[Any] = int(SCREAMING_SNAKE_CASE_ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
lowerCamelCase : Dict = ""
while int_num > 0:
lowerCamelCase : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 | 0 |
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = len(snake_case__ )
snake_case_ = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
snake_case_ = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
snake_case_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
snake_case_ = TapasConfig.from_json_file(lowercase__ )
# set absolute/relative position embeddings parameter
snake_case_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = True
# hparam_utils.py hparams
snake_case_ = 0.66_4694
snake_case_ = 0.20_7951
snake_case_ = 0.12_1194
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = 0.035_2513
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = False
# hparam_utils.py hparams
snake_case_ = 36.4519
snake_case_ = 0.90_3421
snake_case_ = 222.088
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 0.76_3141
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "TABFACT":
snake_case_ = TapasForSequenceClassification(config=lowercase__ )
elif task == "MLM":
snake_case_ = TapasForMaskedLM(config=lowercase__ )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ = TapasModel(config=lowercase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
snake_case_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(lowercase__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 46 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = 0
while b > 0:
if b & 1:
snake_case : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """yolos"""
def __init__( self : Any , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1e-1_2 , UpperCamelCase__ : List[Any]=[512, 864] , UpperCamelCase__ : int=16 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : int=100 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=False , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[int]=5 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : int=0.1 , **UpperCamelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
snake_case : List[str] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Any = intermediate_size
snake_case : int = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Union[str, Any] = initializer_range
snake_case : List[Any] = layer_norm_eps
snake_case : Tuple = image_size
snake_case : Any = patch_size
snake_case : str = num_channels
snake_case : Optional[int] = qkv_bias
snake_case : Optional[Any] = num_detection_tokens
snake_case : Dict = use_mid_position_embeddings
snake_case : List[Any] = auxiliary_loss
# Hungarian matcher
snake_case : List[str] = class_cost
snake_case : Optional[Any] = bbox_cost
snake_case : Tuple = giou_cost
# Loss coefficients
snake_case : Union[str, Any] = bbox_loss_coefficient
snake_case : List[Any] = giou_loss_coefficient
snake_case : Tuple = eos_coefficient
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self : int ) -> float:
"""simple docstring"""
return 1e-4
@property
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return 12
| 638 | 1 |
'''simple docstring'''
def snake_case_ ( a__ : int = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3 ,a__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163 |
'''simple docstring'''
import pprint
import requests
A : str = """https://zenquotes.io/api"""
def snake_case_ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def snake_case_ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
A : int = random_quotes()
pprint.pprint(response)
| 163 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = VideoToVideoSDPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ = False
# No `output_type`.
snake_case__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def a ( self : int ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCAmelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=0 ) -> Tuple:
# 3 frames
lowerCAmelCase__ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def a ( self : Union[str, Any] ) -> str:
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = VideoToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = "np"
lowerCAmelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
lowerCAmelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowerCAmelCase__ = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a ( self : List[Any] ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def a ( self : List[Any] ) -> str:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def a ( self : int ) -> Optional[Any]:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def a ( self : List[str] ) -> Optional[int]:
pass
def a ( self : Optional[Any] ) -> Tuple:
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : str ) -> int:
lowerCAmelCase__ = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ = torch.randn((1, 10, 3, 1_024, 576) , generator=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = video.to("cuda" )
lowerCAmelCase__ = "Spiderman is surfing"
lowerCAmelCase__ = pipe(SCREAMING_SNAKE_CASE__ , video=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=3 , output_type="pt" ).frames
lowerCAmelCase__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 61 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase : Any = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
lowercase : List[str] = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
snake_case : Optional[int] = cn.convert_to_negative(__lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowercase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def lowerCamelCase__ ( ):
snake_case : Union[str, Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
snake_case : Optional[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case : Any = canny.canny(__lowercase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
assert gg.gaussian_filter(__lowercase , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
# laplace diagonals
snake_case : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case : Tuple = conv.img_convolve(__lowercase , __lowercase ).astype(__lowercase )
assert res.any()
def lowerCamelCase__ ( ):
assert med.median_filter(__lowercase , 3 ).any()
def lowerCamelCase__ ( ):
snake_case , snake_case : Dict = sob.sobel_filter(__lowercase )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
snake_case : int = sp.make_sepia(__lowercase , 20 )
assert sepia.all()
def lowerCamelCase__ ( __lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
snake_case : int = bs.Burkes(imread(__lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( __lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
snake_case : Dict = rs.NearestNeighbour(imread(__lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
snake_case : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
snake_case : Tuple = imread(__lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case : Optional[int] = 0
snake_case : Tuple = 0
snake_case : Any = image[x_coordinate][y_coordinate]
snake_case : Any = lbp.get_neighbors_pixel(
__lowercase , __lowercase , __lowercase , __lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case : Optional[Any] = lbp.local_binary_value(__lowercase , __lowercase , __lowercase )
assert lbp_image.any()
| 116 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Any = '''beit'''
def __init__( self , UpperCamelCase__=8192 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=[3, 5, 7, 11] , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
snake_case : Optional[int] = vocab_size
snake_case : List[Any] = hidden_size
snake_case : str = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Tuple = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : Dict = attention_probs_dropout_prob
snake_case : Any = initializer_range
snake_case : int = layer_norm_eps
snake_case : str = image_size
snake_case : int = patch_size
snake_case : Dict = num_channels
snake_case : Optional[Any] = use_mask_token
snake_case : Tuple = use_absolute_position_embeddings
snake_case : Tuple = use_relative_position_bias
snake_case : Optional[Any] = use_shared_relative_position_bias
snake_case : Dict = layer_scale_init_value
snake_case : Any = drop_path_rate
snake_case : str = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case : List[Any] = out_indices
snake_case : Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case : Tuple = use_auxiliary_head
snake_case : int = auxiliary_loss_weight
snake_case : List[Any] = auxiliary_channels
snake_case : Optional[Any] = auxiliary_num_convs
snake_case : Optional[Any] = auxiliary_concat_input
snake_case : Dict = semantic_loss_ignore_index
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[int] = version.parse('''1.11''' )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
| 117 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = """src/diffusers"""
# Matches is_xxx_available()
__snake_case = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
__snake_case = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
__snake_case = """
{0} = None
"""
__snake_case = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
__snake_case = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __lowerCAmelCase ( lowercase : Tuple ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = _re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def __lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(lowercase , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case : Tuple = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case : Tuple = 0
snake_case : Optional[Any] = {}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case : List[Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
snake_case : str = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
snake_case : List[str] = lines[line_index]
snake_case : Any = _re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
snake_case : Any = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Tuple ) -> int:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase , lowercase )
else:
return DUMMY_CLASS.format(lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
if backend_specific_objects is None:
snake_case : List[str] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case : Dict = {}
for backend, objects in backend_specific_objects.items():
snake_case : Union[str, Any] = "[" + ", ".join(F'"{b}"' for b in backend.split("_and_" ) ) + "]"
snake_case : Union[str, Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] )
snake_case : Optional[Any] = dummy_file
return dummy_files
def __lowerCAmelCase ( lowercase : Optional[Any]=False ) -> Any:
"""simple docstring"""
snake_case : str = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case : Dict = {"torch": "pt"}
# Locate actual dummy modules and read their content.
snake_case : Optional[Any] = os.path.join(lowercase , "utils" )
snake_case : Union[str, Any] = {
backend: os.path.join(lowercase , F'dummy_{short_names.get(lowercase , lowercase )}_objects.py' )
for backend in dummy_files.keys()
}
snake_case : Optional[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case : int = f.read()
else:
snake_case : Optional[Any] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 117 | 1 |
'''simple docstring'''
import sys
_UpperCAmelCase : Optional[int] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _SCREAMING_SNAKE_CASE ( __snake_case : str = N ):
_A = -sys.maxsize - 1
for i in range(len(__snake_case ) - 1_2 ):
_A = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
_A = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 0 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : List[str] = None
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.head
while temp is not None:
print(temp.data, end=' ' )
SCREAMING_SNAKE_CASE : Tuple = temp.next
print()
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Node(A )
SCREAMING_SNAKE_CASE : Any = self.head
SCREAMING_SNAKE_CASE : Optional[Any] = new_node
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : List[Any] = node_a.next
SCREAMING_SNAKE_CASE : Any = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Any = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 508 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [0] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Dict = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
UpperCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 508 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
for i in range(len(_lowercase ) - 1 , 0 , -1 ):
lowerCAmelCase = False
for j in range(_lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase = unsorted[j - 1], unsorted[j]
lowerCAmelCase = True
for j in range(_lowercase ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase = unsorted[j + 1], unsorted[j]
lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(",")]
print(f'{cocktail_shaker_sort(unsorted) = }')
| 532 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : str = [True] * limit
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Union[str, Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ : List[Any] = i * 2
while index < limit:
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : str = index + i
lowerCamelCase_ : str = [2]
for i in range(3 , _lowercase , 2 ):
if is_prime[i]:
primes.append(_lowercase )
return primes
def lowercase_ ( _lowercase = 1_000_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = prime_sieve(_lowercase )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
for j in range(i + length , len(_lowercase ) ):
lowerCamelCase_ : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ : int = j - i
lowerCamelCase_ : Any = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 422 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return (pow(lowerCAmelCase_ , 2 ) + step) % modulus
for _ in range(lowerCAmelCase_ ):
# These track the position within the cycle detection logic.
_UpperCAmelCase : List[str] = seed
_UpperCAmelCase : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_UpperCAmelCase : Optional[Any] = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_UpperCAmelCase : List[str] = gcd(hare - tortoise , lowerCAmelCase_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_UpperCAmelCase : str = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Optional[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
lowerCAmelCase_ : Dict = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 711 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ : List[Any] = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : List[str] = """maskformer"""
snake_case : str = {"""hidden_size""": """mask_feature_size"""}
snake_case : Union[str, Any] = ["""resnet""", """swin"""]
snake_case : Optional[Any] = ["""detr"""]
def __init__(self , lowerCAmelCase__ = 2_5_6 , lowerCAmelCase__ = 2_5_6 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0_2 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 2_0.0 , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_UpperCAmelCase : Optional[int] = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = backbone_config.pop("""model_type""" )
_UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Any = config_class.from_dict(lowerCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_UpperCAmelCase : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
_UpperCAmelCase : List[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : int = CONFIG_MAPPING[decoder_type]
_UpperCAmelCase : List[str] = config_class.from_dict(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = backbone_config
_UpperCAmelCase : List[str] = decoder_config
# main feature dimension for the model
_UpperCAmelCase : str = fpn_feature_size
_UpperCAmelCase : Any = mask_feature_size
# initializer
_UpperCAmelCase : Tuple = init_std
_UpperCAmelCase : Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
_UpperCAmelCase : Any = cross_entropy_weight
_UpperCAmelCase : int = dice_weight
_UpperCAmelCase : int = mask_weight
_UpperCAmelCase : int = use_auxiliary_loss
_UpperCAmelCase : Dict = no_object_weight
_UpperCAmelCase : List[Any] = output_auxiliary_logits
_UpperCAmelCase : Optional[Any] = self.decoder_config.encoder_attention_heads
_UpperCAmelCase : List[str] = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase__ )
@classmethod
def snake_case_ (cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return cls(
backbone_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , **lowerCAmelCase__ , )
def snake_case_ (self ):
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : str = self.backbone_config.to_dict()
_UpperCAmelCase : Any = self.decoder_config.to_dict()
_UpperCAmelCase : str = self.__class__.model_type
return output
| 156 | 0 |
UpperCAmelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
assert len(str(SCREAMING_SNAKE_CASE_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCAmelCase = year // 1_00
_UpperCAmelCase = (5 * (century % 4) + 2) % 7
_UpperCAmelCase = year % 1_00
_UpperCAmelCase = centurian % 12
_UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case__ : Tuple = pd.read_csv('''sample_data.csv''', header=None)
snake_case__ : List[str] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case__ : Dict = df.iloc[:, 1:2]
snake_case__ : List[str] = actual_data.values.reshape(len_data, 1)
snake_case__ : Union[str, Any] = MinMaxScaler().fit_transform(actual_data)
snake_case__ : Tuple = 1_0
snake_case__ : str = 5
snake_case__ : Any = 2_0
snake_case__ : Union[str, Any] = len_data - periods * look_back
snake_case__ : Union[str, Any] = actual_data[:division]
snake_case__ : Optional[Any] = actual_data[division - look_back :]
snake_case__ , snake_case__ : Dict = [], []
snake_case__ , snake_case__ : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case__ : int = np.array(train_x)
snake_case__ : List[str] = np.array(test_x)
snake_case__ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case__ : int = np.array([list(i.ravel()) for i in test_y])
snake_case__ : List[Any] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
snake_case__ : List[str] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
snake_case__ : Optional[int] = model.predict(x_test)
| 392 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ):
SCREAMING_SNAKE_CASE = []
for old_item in old_list:
SCREAMING_SNAKE_CASE = old_item.replace('in_layers.0', 'norm1' )
SCREAMING_SNAKE_CASE = new_item.replace('in_layers.2', 'conv1' )
SCREAMING_SNAKE_CASE = new_item.replace('out_layers.0', 'norm2' )
SCREAMING_SNAKE_CASE = new_item.replace('out_layers.3', 'conv2' )
SCREAMING_SNAKE_CASE = new_item.replace('emb_layers.1', 'time_emb_proj' )
SCREAMING_SNAKE_CASE = new_item.replace('skip_connection', 'conv_shortcut' )
SCREAMING_SNAKE_CASE = shave_segments(SCREAMING_SNAKE_CASE_, n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ):
SCREAMING_SNAKE_CASE = []
for old_item in old_list:
SCREAMING_SNAKE_CASE = old_item
SCREAMING_SNAKE_CASE = new_item.replace('norm.weight', 'group_norm.weight' )
SCREAMING_SNAKE_CASE = new_item.replace('norm.bias', 'group_norm.bias' )
SCREAMING_SNAKE_CASE = new_item.replace('proj_out.weight', 'proj_attn.weight' )
SCREAMING_SNAKE_CASE = new_item.replace('proj_out.bias', 'proj_attn.bias' )
SCREAMING_SNAKE_CASE = shave_segments(SCREAMING_SNAKE_CASE_, n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ):
assert isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
SCREAMING_SNAKE_CASE = old_checkpoint[path]
SCREAMING_SNAKE_CASE = old_tensor.shape[0] // 3
SCREAMING_SNAKE_CASE = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
SCREAMING_SNAKE_CASE = old_tensor.shape[0] // config['num_head_channels'] // 3
SCREAMING_SNAKE_CASE = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = old_tensor.split(channels // num_heads, dim=1 )
SCREAMING_SNAKE_CASE = query.reshape(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = key.reshape(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = value.reshape(SCREAMING_SNAKE_CASE_ )
for path in paths:
SCREAMING_SNAKE_CASE = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
SCREAMING_SNAKE_CASE = new_path.replace('middle_block.0', 'mid_block.resnets.0' )
SCREAMING_SNAKE_CASE = new_path.replace('middle_block.1', 'mid_block.attentions.0' )
SCREAMING_SNAKE_CASE = new_path.replace('middle_block.2', 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
SCREAMING_SNAKE_CASE = new_path.replace(replacement['old'], replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
SCREAMING_SNAKE_CASE = old_checkpoint[path['old']][:, :, 0]
else:
SCREAMING_SNAKE_CASE = old_checkpoint[path['old']]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = checkpoint['time_embed.0.weight']
SCREAMING_SNAKE_CASE = checkpoint['time_embed.0.bias']
SCREAMING_SNAKE_CASE = checkpoint['time_embed.2.weight']
SCREAMING_SNAKE_CASE = checkpoint['time_embed.2.bias']
SCREAMING_SNAKE_CASE = checkpoint['input_blocks.0.0.weight']
SCREAMING_SNAKE_CASE = checkpoint['input_blocks.0.0.bias']
SCREAMING_SNAKE_CASE = checkpoint['out.0.weight']
SCREAMING_SNAKE_CASE = checkpoint['out.0.bias']
SCREAMING_SNAKE_CASE = checkpoint['out.2.weight']
SCREAMING_SNAKE_CASE = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
SCREAMING_SNAKE_CASE = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the middle blocks only
SCREAMING_SNAKE_CASE = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the output blocks only
SCREAMING_SNAKE_CASE = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(1, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = (i - 1) // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE = (i - 1) % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
SCREAMING_SNAKE_CASE = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
SCREAMING_SNAKE_CASE = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
SCREAMING_SNAKE_CASE = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': f'''input_blocks.{i}.0''', 'new': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
SCREAMING_SNAKE_CASE = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, additional_replacements=[meta_path, resnet_op], config=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'old': f'''input_blocks.{i}.1''',
'new': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
SCREAMING_SNAKE_CASE = {
f'''input_blocks.{i}.1.qkv.bias''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, additional_replacements=[meta_path], attention_paths_to_split=SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_, )
SCREAMING_SNAKE_CASE = middle_blocks[0]
SCREAMING_SNAKE_CASE = middle_blocks[1]
SCREAMING_SNAKE_CASE = middle_blocks[2]
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, attention_paths_to_split=SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = i // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE = i % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE = [shave_segments(SCREAMING_SNAKE_CASE_, 2 ) for name in output_blocks[i]]
SCREAMING_SNAKE_CASE = {}
for layer in output_block_layers:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = layer.split('.' )[0], shave_segments(SCREAMING_SNAKE_CASE_, 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = [layer_name]
if len(SCREAMING_SNAKE_CASE_ ) > 1:
SCREAMING_SNAKE_CASE = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
SCREAMING_SNAKE_CASE = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': f'''output_blocks.{i}.0''', 'new': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, additional_replacements=[meta_path], config=SCREAMING_SNAKE_CASE_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
SCREAMING_SNAKE_CASE = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
SCREAMING_SNAKE_CASE = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
SCREAMING_SNAKE_CASE = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE_ ) == 2:
SCREAMING_SNAKE_CASE = []
if len(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'old': f'''output_blocks.{i}.1''',
'new': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
SCREAMING_SNAKE_CASE = {
f'''output_blocks.{i}.1.qkv.bias''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, additional_replacements=[meta_path], attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None, config=SCREAMING_SNAKE_CASE_, )
else:
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_, n_shave_prefix_segments=1 )
for path in resnet_0_paths:
SCREAMING_SNAKE_CASE = '.'.join(['output_blocks', str(SCREAMING_SNAKE_CASE_ ), path['old']] )
SCREAMING_SNAKE_CASE = '.'.join(['up_blocks', str(SCREAMING_SNAKE_CASE_ ), 'resnets', str(SCREAMING_SNAKE_CASE_ ), path['new']] )
SCREAMING_SNAKE_CASE = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
snake_case = parser.parse_args()
snake_case = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
snake_case = json.loads(f.read())
snake_case = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
snake_case = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
snake_case = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
snake_case = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
snake_case = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 406 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.