code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: int ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: List[Any]=30 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Optional[Any]=5 ,lowerCamelCase_: Dict=4 ,lowerCamelCase_: Optional[Any]=37 ,lowerCamelCase_: List[Any]="gelu" ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple=10 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: str=2 ,) -> List[Any]:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : int = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase_ : Dict = num_patches + 1
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: int ) -> List[Any]:
UpperCAmelCase_ : Dict = ViTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ViTForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Dict = ViTForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: int ) -> Any:
UpperCAmelCase_ : int = self.type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = ViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = ViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A__ : Tuple = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = True
A__ : Optional[int] = False
A__ : List[Any] = False
A__ : Tuple = False
def A__ ( self: int ) -> str:
UpperCAmelCase_ : List[str] = ViTModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A__ ( self: Any ) -> Union[str, Any]:
pass
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Any ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: Union[str, Any] ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = ViTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: str ) -> Dict:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : int = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
@slow
def A__ ( self: Optional[int] ) -> List[Any]:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCAmelCase_ : Optional[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" ,size=480 )
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : str = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : int = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,interpolate_pos_encoding=lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self: int ) -> Dict:
UpperCAmelCase_ : Dict = ViTModel.from_pretrained("""facebook/dino-vits8""" ,torch_dtype=torch.floataa ,device_map="""auto""" )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : Union[str, Any] = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ )
| 345 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 1 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class __A (snake_case__):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[Any] ) ->bool:
"""simple docstring"""
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = max_length
snake_case_ = max_position_embeddings
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : int ) ->bool:
"""simple docstring"""
snake_case_ = input_ids.shape[-1]
snake_case_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) ->Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" , UpperCAmelCase_ , )
snake_case_ = start_length
snake_case_ = max_new_tokens
snake_case_ = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : List[str] ) ->bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[float] = None ) ->List[Any]:
"""simple docstring"""
snake_case_ = max_time
snake_case_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Union[str, Any] ) ->bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class __A (snake_case__):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[int] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : str ) ->bool:
"""simple docstring"""
return any(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) for criteria in self )
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
return None
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> StoppingCriteriaList:
snake_case_ = stopping_criteria.max_length
snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , _SCREAMING_SNAKE_CASE )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_SCREAMING_SNAKE_CASE ) )
return new_stopping_criteria
| 233 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> list:
snake_case_ = length or len(_SCREAMING_SNAKE_CASE )
snake_case_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ , snake_case_ = list_data[i + 1], list_data[i]
snake_case_ = True
return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__: str = logging.get_logger(__name__)
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] )->Tuple:
A__ = b.T
A__ = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=1 )
A__ = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=0 )
A__ = np.matmul(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict )->List[str]:
A__ = x.reshape(-1 , 3 )
A__ = squared_euclidean_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return np.argmin(__SCREAMING_SNAKE_CASE , axis=1 )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = None,__lowerCamelCase = PILImageResampling.BILINEAR,__lowerCamelCase = True,__lowerCamelCase = True,**__lowerCamelCase,):
super().__init__(**__SCREAMING_SNAKE_CASE )
A__ = size if size is not None else {'''height''': 256, '''width''': 256}
A__ = get_size_dict(__SCREAMING_SNAKE_CASE )
A__ = np.array(__SCREAMING_SNAKE_CASE ) if clusters is not None else None
A__ = do_resize
A__ = size
A__ = resample
A__ = do_normalize
A__ = do_color_quantize
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = PILImageResampling.BILINEAR,__lowerCamelCase = None,**__lowerCamelCase,):
A__ = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
__SCREAMING_SNAKE_CASE,size=(size['''height'''], size['''width''']),resample=__SCREAMING_SNAKE_CASE,data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,):
A__ = rescale(image=__SCREAMING_SNAKE_CASE,scale=1 / 127.5,data_format=__SCREAMING_SNAKE_CASE )
A__ = image - 1
return image
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = ChannelDimension.FIRST,**__lowerCamelCase,):
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(__SCREAMING_SNAKE_CASE )
A__ = resample if resample is not None else self.resample
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A__ = clusters if clusters is not None else self.clusters
A__ = np.array(__SCREAMING_SNAKE_CASE )
A__ = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A__ = [self.resize(image=__SCREAMING_SNAKE_CASE,size=__SCREAMING_SNAKE_CASE,resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A__ = [self.normalize(image=__SCREAMING_SNAKE_CASE ) for image in images]
if do_color_quantize:
A__ = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A__ = np.array(__SCREAMING_SNAKE_CASE )
A__ = color_quantize(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A__ = images.shape[0]
A__ = images.reshape(__SCREAMING_SNAKE_CASE,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A__ = list(__SCREAMING_SNAKE_CASE )
else:
A__ = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for image in images]
A__ = {'''input_ids''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE,tensor_type=__SCREAMING_SNAKE_CASE )
| 193 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93 | 0 |
from __future__ import annotations
__snake_case : Union[str, Any] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ = {}
lowerCAmelCase__ = source_vertex
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {self.source_vertex}
lowerCAmelCase__ = None
lowerCAmelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCamelCase )
lowerCAmelCase__ = vertex
queue.append(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ = self.parent.get(_UpperCamelCase )
if target_vertex_parent is None:
lowerCAmelCase__ = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_UpperCamelCase )
return self.shortest_path(_UpperCamelCase ) + F"->{target_vertex}"
if __name__ == "__main__":
__snake_case : Any = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 363 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__snake_case : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__snake_case : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCAmelCase__ = bs[:]
lowerCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="replace" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=False , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase__ = json.load(_UpperCamelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = errors # how to handle errors in decoding
lowerCAmelCase__ = bytes_to_unicode()
lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding='utf-8' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase__ = {}
lowerCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(_UpperCamelCase )
lowerCAmelCase__ = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(_UpperCamelCase ):
try:
lowerCAmelCase__ = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(_UpperCamelCase )
lowerCAmelCase__ = new_word
if len(_UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(_UpperCamelCase )
lowerCAmelCase__ = ' '.join(_UpperCamelCase )
lowerCAmelCase__ = word
return word
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = []
for token in re.findall(self.pat , _UpperCamelCase ):
lowerCAmelCase__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(' ' ) )
return bpe_tokens
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.decoder.get(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = ''.join(_UpperCamelCase )
lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + '\n' )
lowerCAmelCase__ = 0
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase__ = token_index
writer.write(' '.join(_UpperCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ = ' ' + text
return (text, kwargs)
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase = None , _UpperCamelCase = None , ):
"""simple docstring"""
lowerCAmelCase__ = super()._pad(
encoded_inputs=_UpperCamelCase , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase__ = len(encoded_inputs['global_attention_mask'] ) != len(_UpperCamelCase )
if needs_to_be_padded:
lowerCAmelCase__ = len(_UpperCamelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 122 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : Tuple ):
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def a__ ( _UpperCamelCase : Optional[Any] ):
with open(_UpperCamelCase ,encoding='''utf_8''' ) as f:
__lowerCamelCase = csv.reader(_UpperCamelCase )
__lowerCamelCase = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Tuple ,_UpperCamelCase : str ,_UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : Any ):
__lowerCamelCase = []
for dataset in encoded_datasets:
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa )
__lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = mc_label
__lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 )
parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 )
parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
__lowerCamelCase = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowerCamelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase : Tuple ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowerCamelCase = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase = (train_dataset, eval_dataset)
__lowerCamelCase = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
__lowerCamelCase = model.config.n_positions // 2 - 2
__lowerCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = RandomSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size )
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = SequentialSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase = args.max_steps
__lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase = list(model.named_parameters() )
__lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowerCamelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
__lowerCamelCase = get_linear_schedule_with_warmup(
_UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase )
if args.do_train:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
torch.save(model_to_save.state_dict() ,_UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
__lowerCamelCase ,__lowerCamelCase = 0, 0
__lowerCamelCase ,__lowerCamelCase = 0, 0
for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = mc_logits.detach().cpu().numpy()
__lowerCamelCase = mc_labels.to('''cpu''' ).numpy()
__lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase = eval_loss / nb_eval_steps
__lowerCamelCase = eval_accuracy / nb_eval_examples
__lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(_UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 330 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a__ ( _UpperCamelCase : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = module
__lowerCamelCase = nn.Sequential(
nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , )
__lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ = """bigscience/bloom-1b7"""
# Constant values
lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74
lowerCAmelCase__ = """Hello my name is"""
lowerCAmelCase__ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
# Models and tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def lowerCamelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_abit.config
self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) )
__lowerCamelCase = config.to_dict()
__lowerCamelCase = config.to_diff_dict()
__lowerCamelCase = config.to_json_string()
def lowerCamelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__lowerCamelCase = self.model_fpaa.get_memory_footprint()
__lowerCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowerCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitsAndBytesConfig()
__lowerCamelCase = True
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitsAndBytesConfig()
with self.assertRaises(__UpperCAmelCase ):
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = self.model_fpaa.to(torch.floataa )
__lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.half()
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.float()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
__lowerCamelCase = '''t5-small'''
__lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
__lowerCamelCase = '''Translate in German: Hello, my dog is cute'''
def lowerCamelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowerCamelCase = None
# test with `t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
__lowerCamelCase = modules
def lowerCamelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__lowerCamelCase = '''bigscience/bloom-560m'''
__lowerCamelCase = '''t5-small'''
# Different types of model
__lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Sequence classification model
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# CausalLM model
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Seq2seq model
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def lowerCamelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowerCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
__lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowerCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowerCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCAmelCase ) ):
__lowerCamelCase = LoRALayer(module.q_proj , rank=16 )
__lowerCamelCase = LoRALayer(module.k_proj , rank=16 )
__lowerCamelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowerCamelCase = model.forward(**__UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """gpt2-xl"""
lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
| 330 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCamelCase_ ( a_ ):
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(snake_case__ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self._create_example_records()
UpperCAmelCase = Dataset.from_list(snake_case__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(snake_case__ ):
self.assertDictEqual(snake_case__ , example_records[i] )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self._create_example_records()
UpperCAmelCase = Dataset.from_list(snake_case__ )
UpperCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase_ ( self ) -> int: # checks what happens with missing columns
"""simple docstring"""
UpperCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
UpperCAmelCase = Dataset.from_list(snake_case__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCamelCase_ ( self ) -> List[str]: # checks if the type can be inferred from the second record
"""simple docstring"""
UpperCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
UpperCAmelCase = Dataset.from_list(snake_case__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = Dataset.from_list([] )
self.assertEqual(len(snake_case__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 248 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCAmelCase )
UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
UpperCAmelCase = parser.parse_args()
if not hasattr(lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 248 | 1 |
from collections.abc import Generator
from math import sin
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) != 32:
raise ValueError("Input must be of length 32" )
a__ : Optional[Any] =b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
a__ : Optional[Any] =format(SCREAMING_SNAKE_CASE , "08x" )[-8:]
a__ : Tuple =b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
a__ : Any =b""
for char in message:
bit_string += format(SCREAMING_SNAKE_CASE , "08b" ).encode("utf-8" )
a__ : Optional[Any] =format(len(SCREAMING_SNAKE_CASE ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(SCREAMING_SNAKE_CASE ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(SCREAMING_SNAKE_CASE ) , 512 ):
a__ : List[str] =bit_string[pos : pos + 512]
a__ : Union[str, Any] =[]
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
a__ : Optional[int] =format(SCREAMING_SNAKE_CASE , "032b" )
a__ : Optional[int] =""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(SCREAMING_SNAKE_CASE , 2 )
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return (a + b) % 2**32
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
a__ : str =preprocess(SCREAMING_SNAKE_CASE )
a__ : List[str] =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
a__ : List[Any] =0x67_452_301
a__ : Optional[Any] =0xEF_CDA_B89
a__ : List[Any] =0x98_BAD_CFE
a__ : Any =0x10_325_476
a__ : str =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(SCREAMING_SNAKE_CASE ):
a__ : List[Any] =aa
a__ : List[Any] =ba
a__ : List[Any] =ca
a__ : List[str] =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
a__ : Tuple =d ^ (b & (c ^ d))
a__ : str =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
a__ : Any =c ^ (d & (b ^ c))
a__ : str =(5 * i + 1) % 16
elif i <= 47:
a__ : Dict =b ^ c ^ d
a__ : Optional[Any] =(3 * i + 5) % 16
else:
a__ : Optional[Any] =c ^ (b | not_aa(SCREAMING_SNAKE_CASE ))
a__ : str =(7 * i) % 16
a__ : Optional[Any] =(f + a + added_consts[i] + block_words[g]) % 2**32
a__ : List[Any] =d
a__ : List[str] =c
a__ : int =b
a__ : Dict =sum_aa(SCREAMING_SNAKE_CASE , left_rotate_aa(SCREAMING_SNAKE_CASE , shift_amounts[i] ) )
# Add hashed chunk to running total
a__ : Union[str, Any] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[Any] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : int =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[str] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Tuple =reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
# Base Case
if index == len(UpperCamelCase_ ):
return True
# Recursive Step
for i in range(UpperCamelCase_ ):
if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [-1] * len(UpperCamelCase_ )
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ):
return colored_vertices
return []
| 343 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 362 |
from math import pow
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__: Optional[Any] =int(pow(__a , __a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ , lowerCamelCase__: int =backtrack(
__a , __a , current_number + 1 , __a , __a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ , lowerCamelCase__: Dict =backtrack(
__a , __a , current_number + 1 , __a , __a )
return current_sum, solutions_count
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(__a , __a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _A ( UpperCamelCase_ : Any) -> List[Any]:
'''simple docstring'''
__lowercase = filter(lambda UpperCamelCase_: p.requires_grad, model.parameters())
__lowercase = sum([np.prod(p.size()) for p in model_parameters])
return params
_a = logging.getLogger(__name__)
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> Optional[int]:
'''simple docstring'''
if metric == "rouge2":
__lowercase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__lowercase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__lowercase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function.")
__lowercase = ModelCheckpoint(
dirpath=UpperCamelCase_, filename=UpperCamelCase_, monitor=F"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : List[str]) -> int:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=UpperCamelCase_, verbose=UpperCamelCase_, )
class _lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _lowercase ( self : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ):
__lowercase = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCAmelCase__ )
@rank_zero_only
def _lowercase ( self : List[Any], UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : pl.LightningModule, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[int]=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowercase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__lowercase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowercase = od / "test_results.txt"
__lowercase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowercase = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowercase = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCAmelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCAmelCase__ )
with open(UpperCAmelCase__, "a+" ) as writer:
for key in sorted(UpperCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowercase = metrics[key]
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = val.item()
__lowercase = F"""{key}: {val:.6f}\n"""
writer.write(UpperCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
__lowercase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(UpperCAmelCase__ )
@rank_zero_only
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
try:
__lowercase = pl_module.model.model.num_parameters()
except AttributeError:
__lowercase = pl_module.model.num_parameters()
__lowercase = count_trainable_parameters(UpperCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _lowercase ( self : Any, UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : pl.LightningModule ):
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(UpperCAmelCase__, UpperCAmelCase__, "test" )
@rank_zero_only
def _lowercase ( self : int, UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : Optional[Any] ):
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 17 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322 | 1 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCamelCase_ : Union[str, Any] = 'scheduler_config.json'
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[Any] = 1
lowercase_ : str = 2
lowercase_ : List[str] = 3
lowercase_ : List[Any] = 4
lowercase_ : int = 5
@dataclass
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : jnp.ndarray
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : List[Any] = []
lowercase_ : Any = True
@classmethod
def lowerCamelCase_ ( cls , snake_case_ = None , snake_case_ = None , snake_case_=False , **snake_case_ , ):
"""simple docstring"""
A_ , A_ : str = cls.load_config(
pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , )
A_ , A_ : int = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ )
if hasattr(snake_case_ , 'create_state' ) and getattr(snake_case_ , 'has_state' , snake_case_ ):
A_ : Optional[int] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = False , **snake_case_ ):
"""simple docstring"""
self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
A_ : int = list(set([cls.__name__] + cls._compatibles ) )
A_ : Dict = importlib.import_module(__name__.split('.' )[0] )
A_ : Tuple = [
getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ )
]
return compatible_classes
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
assert len(_UpperCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCAmelCase ) - x.ndim) ) , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=0.999 , _UpperCAmelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCAmelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
A_ : List[Any] = []
for i in range(_UpperCAmelCase ):
A_ : Optional[Any] = i / num_diffusion_timesteps
A_ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCAmelCase ) / alpha_bar(_UpperCAmelCase ) , _UpperCAmelCase ) )
return jnp.array(_UpperCAmelCase , dtype=_UpperCAmelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def lowerCamelCase_ ( cls , snake_case_ ):
"""simple docstring"""
A_ : Tuple = scheduler.config
if config.trained_betas is not None:
A_ : str = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
A_ : str = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : int = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : Optional[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
A_ : List[Any] = 1.0 - betas
A_ : Optional[int] = jnp.cumprod(snake_case_ , axis=0 )
return cls(
alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = state.alphas_cumprod
A_ : Tuple = alphas_cumprod[timesteps] ** 0.5
A_ : str = sqrt_alpha_prod.flatten()
A_ : str = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
A_ : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ : List[str] = sqrt_one_minus_alpha_prod.flatten()
A_ : List[Any] = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ : List[Any] = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ : int = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 286 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = in_proj_bias[: config.hidden_size]
A_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = dct.pop(_UpperCAmelCase )
A_ : Optional[int] = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCAmelCase , )
A_ : Optional[int] = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=384 , num_labels=1000 )
A_ : Union[str, Any] = False
# load original model from timm
A_ : List[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
A_ : Any = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = 'huggingface/label-files'
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : List[Any] = ViTHybridModel(_UpperCAmelCase ).eval()
else:
A_ : str = ViTHybridForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
A_ : List[str] = transform.transforms
A_ : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A_ : Tuple = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Optional[Any] = prepare_img()
A_ : Any = transform(_UpperCAmelCase ).unsqueeze(0 )
A_ : Dict = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
A_ : List[Any] = model(_UpperCAmelCase )
A_ : List[str] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
A_ : Union[str, Any] = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
A_ : Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 286 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , ) -> int:
super().__init__()
lowercase__ : Tuple = nn.Embedding(a_ , a_ )
lowercase__ : int = nn.Embedding(a_ , a_ )
lowercase__ : str = False
lowercase__ : str = nn.Dropout(p=a_ )
lowercase__ : List[Any] = TaConfig(
vocab_size=a_ , d_model=a_ , num_heads=a_ , d_kv=a_ , d_ff=a_ , dropout_rate=a_ , feed_forward_proj=a_ , is_decoder=a_ , is_encoder_decoder=a_ , )
lowercase__ : str = nn.ModuleList()
for lyr_num in range(a_ ):
lowercase__ : Optional[Any] = TaBlock(a_ )
self.encoders.append(a_ )
lowercase__ : Optional[int] = TaLayerNorm(a_ )
lowercase__ : Tuple = nn.Dropout(p=a_ )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : Optional[int] = self.token_embedder(a_ )
lowercase__ : List[str] = encoder_input_tokens.shape[1]
lowercase__ : str = torch.arange(a_ , device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
lowercase__ : List[Any] = self.dropout_pre(a_ )
# inverted the attention mask
lowercase__ : Optional[Any] = encoder_input_tokens.size()
lowercase__ : List[Any] = self.get_extended_attention_mask(a_ , a_ )
for lyr in self.encoders:
lowercase__ : List[Any] = lyr(a_ , a_ )[0]
lowercase__ : Tuple = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 359 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
def merge(UpperCAmelCase , UpperCAmelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase ) <= 1:
return collection
lowercase__ : int = len(UpperCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: str = input("""Enter numbers separated by a comma:\n""").strip()
__a: Optional[int] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 214 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCAmelCase__ = HUGGINGFACE_HUB_CACHE
UpperCAmelCase__ = """config.json"""
UpperCAmelCase__ = """diffusion_pytorch_model.bin"""
UpperCAmelCase__ = """diffusion_flax_model.msgpack"""
UpperCAmelCase__ = """model.onnx"""
UpperCAmelCase__ = """diffusion_pytorch_model.safetensors"""
UpperCAmelCase__ = """weights.pb"""
UpperCAmelCase__ = """https://huggingface.co"""
UpperCAmelCase__ = default_cache_path
UpperCAmelCase__ = """diffusers_modules"""
UpperCAmelCase__ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
UpperCAmelCase__ = ["""fp16""", """non-ema"""]
UpperCAmelCase__ = """.self_attn"""
| 289 |
"""simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_lowerCamelCase : Optional[int] = get_tests_dir('''fixtures''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = mock.Mock()
_snake_case = 500
_snake_case = {}
_snake_case = HTTPError
_snake_case = {}
# Download this model to make sure it's in the cache.
_snake_case = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowercase ) as mock_head:
_snake_case = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def A ( self : str ):
'''simple docstring'''
with self.assertRaises(lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
_snake_case = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
_snake_case = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(lowercase )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def A ( cls : int ):
'''simple docstring'''
_snake_case = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def A ( cls : Optional[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def A ( self : Dict ):
'''simple docstring'''
_snake_case = ViTImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
_snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='test-image-processor' , push_to_hub=lowercase , use_auth_token=self._token )
_snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def A ( self : int ):
'''simple docstring'''
_snake_case = ViTImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
_snake_case = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='valid_org/test-image-processor-org' , push_to_hub=lowercase , use_auth_token=self._token )
_snake_case = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def A ( self : Any ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_snake_case = CustomImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
_snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 362 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : Union[str, Any] = logging.getLogger()
def a_ ( __lowercase : Path , __lowercase : list ) -> Tuple:
_snake_case = '\n'.join(__lowercase )
Path(__lowercase ).open('w' ).writelines(__lowercase )
_lowerCamelCase : Any = '''patrickvonplaten/t5-tiny-random'''
_lowerCamelCase : List[Any] = '''sshleifer/bart-tiny-random'''
_lowerCamelCase : List[Any] = '''sshleifer/tiny-mbart'''
_lowerCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowercase , lowercase )
_snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
run_generate()
assert Path(lowercase ).exists()
# os.remove(Path(output_file_name))
def A ( self : List[Any] ):
'''simple docstring'''
self.run_eval_tester(lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A ( self : Any , lowercase : int ):
'''simple docstring'''
self.run_eval_tester(lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A ( self : Any , lowercase : str ):
'''simple docstring'''
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_snake_case = Path(self.get_auto_remove_tmp_dir() )
_snake_case = str(tmp_dir / 'scores.json' )
_snake_case = str(tmp_dir / 'val.target' )
_dump_articles(lowercase , text['en'] )
_dump_articles(lowercase , text['de'] )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = f'''
run_eval_search.py
{model}
{str(lowercase )}
{str(lowercase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(lowercase , 'argv' , lowercase ):
with CaptureStdout() as cs:
run_search()
_snake_case = [' num_beams | length_penalty', model, 'Best score args']
_snake_case = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase ).exists()
os.remove(Path(lowercase ) )
| 130 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCamelCase ( _A = "isbn/0140328726" ):
lowerCAmelCase_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
lowerCAmelCase_ = f"{olid} is not a valid Open Library olid"
raise ValueError(_A )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
lowerCAmelCase_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCAmelCase_ = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
lowerCAmelCase_ = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_A , _A ):
lowerCAmelCase_ = ''', '''.join(_A )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_A = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(f"\nSearching Open Library for ISBN: {isbn}...\n")
try:
_A = summarize_book(get_openlibrary_data(f"isbn/{isbn}"))
print('''\n'''.join(f"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"Sorry, there are no results for ISBN: {isbn}.")
| 278 |
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( _A ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[int] = vocab_size
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : Any = (image_size // patch_size) ** 2
UpperCAmelCase__ : List[Any] = num_patches + 1
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : List[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Dict = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FlaxBeitModel(config=_lowerCamelCase )
UpperCAmelCase__ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FlaxBeitForMaskedImageModeling(config=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.type_sequence_label_size
UpperCAmelCase__ : List[Any] = FlaxBeitForImageClassification(config=_lowerCamelCase )
UpperCAmelCase__ : str = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Tuple = FlaxBeitForImageClassification(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = model(_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = FlaxBeitModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(_lowerCamelCase )
UpperCAmelCase__ : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Any = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase , **_lowerCamelCase ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : List[str] = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
UpperCAmelCase__ : Union[str, Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> Dict:
UpperCAmelCase__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a (self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
UpperCAmelCase__ : List[Any] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : str = image_processor(images=_lowerCamelCase , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
UpperCAmelCase__ : Optional[int] = np.ones((1, 196) , dtype=_lowerCamelCase )
# forward pass
UpperCAmelCase__ : Optional[int] = model(pixel_values=_lowerCamelCase , bool_masked_pos=_lowerCamelCase )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 196, 8192)
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _lowerCamelCase , atol=1e-2 ) )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Optional[int] = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_lowerCamelCase , return_tensors="""np""" )
# forward pass
UpperCAmelCase__ : List[str] = model(**_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = outputs.logits
# verify the logits
UpperCAmelCase__ : Tuple = (1, 1000)
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
UpperCAmelCase__ : Any = 281
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="""np""" )
# forward pass
UpperCAmelCase__ : Optional[Any] = model(**_lowerCamelCase )
UpperCAmelCase__ : List[str] = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 21841)
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Any = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
UpperCAmelCase__ : List[Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
| 166 |
"""simple docstring"""
_A = range(2, 20 + 1)
_A = [10**k for k in range(ks[-1] + 1)]
_A = {}
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[str] = sum(a_i[j] for j in range(lowerCAmelCase , len(lowerCAmelCase ) ) )
UpperCAmelCase__ : str = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase ) , lowerCAmelCase ) ) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0
UpperCAmelCase__ : Optional[Any] = n - i
UpperCAmelCase__ : Union[str, Any] = memo.get(lowerCAmelCase )
if sub_memo is not None:
UpperCAmelCase__ : Any = sub_memo.get(lowerCAmelCase )
if jumps is not None and len(lowerCAmelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase__ : Optional[int] = -1
for _k in range(len(lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase__ : str = _k
break
if max_jump >= 0:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase__ : Any = diff + c
for j in range(min(lowerCAmelCase , len(lowerCAmelCase ) ) ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = divmod(lowerCAmelCase , 10 )
if new_c > 0:
add(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
UpperCAmelCase__ : int = []
else:
UpperCAmelCase__ : Union[str, Any] = {c: []}
UpperCAmelCase__ : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = next_term(lowerCAmelCase , k - 1 , i + dn , lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = compute(lowerCAmelCase , lowerCAmelCase , i + dn , lowerCAmelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase__ : Any = 0
while j < len(lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase__ : Tuple = i
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0, 0
for j in range(len(lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase__ : Dict = ds_c + ds_b
diff += addend
UpperCAmelCase__ : Tuple = 0
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = a_i[j] + addend
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = divmod(lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return diff, i - start_i
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
for j in range(lowerCAmelCase , len(lowerCAmelCase ) ):
UpperCAmelCase__ : Optional[Any] = digits[j] + addend
if s >= 10:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = divmod(lowerCAmelCase , 10 )
UpperCAmelCase__ : Any = addend // 10 + quotient
else:
UpperCAmelCase__ : Optional[Any] = s
UpperCAmelCase__ : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = divmod(lowerCAmelCase , 10 )
digits.append(lowerCAmelCase )
def a__ ( lowerCAmelCase = 10**15 ) -> int:
UpperCAmelCase__ : Optional[int] = [1]
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Dict = 0
while True:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = next_term(lowerCAmelCase , 20 , i + dn , lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase__ : Optional[int] = 0
for j in range(len(lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 166 | 1 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
_UpperCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
_UpperCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
_UpperCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[Any]="binary" , lowerCAmelCase : int=None , lowerCAmelCase : str="warn" , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =recall_score(
lowerCAmelCase , lowerCAmelCase , labels=lowerCAmelCase , pos_label=lowerCAmelCase , average=lowerCAmelCase , sample_weight=lowerCAmelCase , zero_division=lowerCAmelCase , )
return {"recall": float(lowerCAmelCase ) if score.size == 1 else score}
| 173 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
_UpperCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
_UpperCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
_UpperCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[Any]="binary" , lowerCAmelCase : int=None , lowerCAmelCase : str="warn" , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =recall_score(
lowerCAmelCase , lowerCAmelCase , labels=lowerCAmelCase , pos_label=lowerCAmelCase , average=lowerCAmelCase , sample_weight=lowerCAmelCase , zero_division=lowerCAmelCase , )
return {"recall": float(lowerCAmelCase ) if score.size == 1 else score}
| 173 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase__ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase__ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase__ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCamelCase = Accelerator()
_UpperCamelCase = (accelerator.state.process_index + 2, 10)
_UpperCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
_UpperCamelCase = ''''''
_UpperCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_UpperCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_UpperCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 335 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__SCREAMING_SNAKE_CASE = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__SCREAMING_SNAKE_CASE = field(
default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__SCREAMING_SNAKE_CASE = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__SCREAMING_SNAKE_CASE = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__SCREAMING_SNAKE_CASE = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__SCREAMING_SNAKE_CASE = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Training seed."""} )
__SCREAMING_SNAKE_CASE = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
__SCREAMING_SNAKE_CASE = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__SCREAMING_SNAKE_CASE = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__SCREAMING_SNAKE_CASE = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__SCREAMING_SNAKE_CASE = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(
default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 335 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = """pytorch_model.bin"""
@dataclasses.dataclass
class A :
__snake_case = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
__snake_case = dataclasses.field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class A :
__snake_case = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
__snake_case = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
__snake_case = dataclasses.field(
default=lowercase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
__snake_case = dataclasses.field(
default=lowercase_ , metadata={'help': 'The name of the task to train on.'} , )
__snake_case = dataclasses.field(
default=lowercase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class A :
__snake_case = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
__snake_case = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
__snake_case = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'
} , )
__snake_case = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__snake_case = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__snake_case = dataclasses.field(
default=lowercase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__snake_case = dataclasses.field(
default=lowercase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__snake_case = dataclasses.field(
default=lowercase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__snake_case = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__snake_case = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__snake_case = dataclasses.field(
default=lowercase_ , metadata={'help': 'Random seed for initialization.'} , )
def __UpperCamelCase ( _A , _A , _A , _A , _A , _A ):
lowerCAmelCase_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowerCAmelCase_ = dataset.filter(lambda _A : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCAmelCase_ = int(eval_result * len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
lowerCAmelCase_ = dataset.sort('''probability''' , reverse=lowerCAmelCase__ )
lowerCAmelCase_ = dataset.select(range(lowerCAmelCase__ ) )
lowerCAmelCase_ = dataset.remove_columns(['''label''', '''probability'''] )
lowerCAmelCase_ = dataset.rename_column('''prediction''' , '''label''' )
lowerCAmelCase_ = dataset.map(lambda _A : {"label": idalabel[example["label"]]} )
lowerCAmelCase_ = dataset.shuffle(seed=args.seed )
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , f"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase__ , index=lowerCAmelCase__ )
else:
dataset.to_json(lowerCAmelCase__ )
def __UpperCamelCase ( _A , _A , _A , _A , **_A ):
lowerCAmelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase_ = STModelArguments(model_name_or_path=lowerCAmelCase__ )
lowerCAmelCase_ = STDataArguments(train_file=lowerCAmelCase__ , infer_file=lowerCAmelCase__ )
lowerCAmelCase_ = STTrainingArguments(output_dir=lowerCAmelCase__ )
lowerCAmelCase_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase__ ).items():
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Sanity checks
lowerCAmelCase_ = {}
lowerCAmelCase_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCAmelCase_ = args.train_file
lowerCAmelCase_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCAmelCase_ = args.eval_file
for key in data_files:
lowerCAmelCase_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
lowerCAmelCase_ = extension
else:
assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowerCAmelCase_ = f"{args.output_dir}/self-train_iter-{{}}".format
lowerCAmelCase_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
accelerator.wait_for_everyone()
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
# Show the progress bar
lowerCAmelCase_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowerCAmelCase_ = data_dir_format(lowerCAmelCase__ )
assert os.path.exists(lowerCAmelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , '''stage-1''' )
lowerCAmelCase_ = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
arguments_dict.update({key: value} )
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowerCAmelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' )
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , '''stage-2''' )
# Update arguments_dict
lowerCAmelCase_ = model_path
lowerCAmelCase_ = data_files['train']
lowerCAmelCase_ = current_output_dir
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowerCAmelCase__ )
lowerCAmelCase_ = iteration
lowerCAmelCase_ = data_dir_format(iteration + 1 )
lowerCAmelCase_ = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase__ , '''best-checkpoint''' ) )
lowerCAmelCase_ = config.idalabel
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , '''eval_results_best-checkpoint.json''' )
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''r''' ) as f:
lowerCAmelCase_ = float(json.load(lowerCAmelCase__ )[args.eval_metric] )
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowerCAmelCase__ )
# Loading the dataset from local csv or json files.
lowerCAmelCase_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['data']
lowerCAmelCase_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f"eval_results_iter-{iteration}.json" ) )
if os.path.exists(lowerCAmelCase__ ):
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.wait_for_everyone()
lowerCAmelCase_ = os.path.join(lowerCAmelCase__ , f"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCAmelCase_ = eval_result
if best_iteration is None:
lowerCAmelCase_ = new_iteration
lowerCAmelCase_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCAmelCase_ = new_iteration
lowerCAmelCase_ = new_eval_result
lowerCAmelCase_ = 0
else:
if new_eval_result == best_eval_result:
lowerCAmelCase_ = new_iteration
lowerCAmelCase_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCAmelCase_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowerCAmelCase__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f"eval_results_iter-{iteration}.json" ) , os.path.join(lowerCAmelCase__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(lowerCAmelCase__ , '''eval_results_best-iteration.json''' ) , )
| 278 |
"""simple docstring"""
import random
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : float , lowerCAmelCase__ : bool = False ) -> dict:
"""simple docstring"""
lowerCAmelCase_ : dict = {i: [] for i in range(lowerCAmelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCAmelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCAmelCase__ ):
for j in range(i + 1 , lowerCAmelCase__ ):
if random.random() < probability:
graph[i].append(lowerCAmelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCAmelCase__ )
return graph
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> dict:
"""simple docstring"""
return {
i: [j for j in range(lowerCAmelCase__ ) if i != j] for i in range(lowerCAmelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Tuple ,A : int ,A : int ,A : Optional[int] = None ,A : int = 5_02_57 ,A : int = 10_24 ,A : int = 7_68 ,A : int = 12 ,A : int = 12 ,A : Optional[int] = None ,A : str = "gelu_new" ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 1E-5 ,A : float = 0.02 ,A : bool = True ,A : bool = True ,A : bool = False ,A : bool = False ,):
super().__init__()
__A = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
__A = prefix_inner_dim
__A = prefix_hidden_dim
__A = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__A = (
nn.Linear(self.prefix_hidden_dim ,A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__A = GPTaConfig(
vocab_size=A ,n_positions=A ,n_embd=A ,n_layer=A ,n_head=A ,n_inner=A ,activation_function=A ,resid_pdrop=A ,embd_pdrop=A ,attn_pdrop=A ,layer_norm_epsilon=A ,initializer_range=A ,scale_attn_weights=A ,use_cache=A ,scale_attn_by_inverse_layer_idx=A ,reorder_and_upcast_attn=A ,)
__A = GPTaLMHeadModel(A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.Tensor ,A : torch.Tensor ,A : Optional[torch.Tensor] = None ,A : Optional[torch.Tensor] = None ,):
__A = self.transformer.transformer.wte(A )
__A = self.encode_prefix(A )
__A = self.decode_prefix(A )
__A = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
__A = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
__A = torch.cat((dummy_token, input_ids) ,dim=1 )
__A = self.transformer(inputs_embeds=A ,labels=A ,attention_mask=A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCamelCase_ ( self : int ,A : int ,A : torch.device ):
return torch.zeros(A ,self.prefix_length ,dtype=torch.intaa ,device=A )
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
return self.encode_prefix(A )
@torch.no_grad()
def UpperCamelCase_ ( self : str ,A : List[str] ,A : List[Any] ,A : List[Any] ):
__A = torch.split(A ,1 ,dim=0 )
__A = []
__A = []
for feature in features:
__A = self.decode_prefix(feature.to(A ) ) # back to the clip feature
# Only support beam search for now
__A , __A = self.generate_beam(
input_embeds=A ,device=A ,eos_token_id=A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__A = torch.stack(A )
__A = torch.stack(A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCamelCase_ ( self : Tuple ,A : List[str]=None ,A : Dict=None ,A : Optional[int]=None ,A : int = 5 ,A : int = 67 ,A : float = 1.0 ,A : Optional[int] = None ,):
__A = eos_token_id
__A = None
__A = None
__A = torch.ones(A ,device=A ,dtype=torch.int )
__A = torch.zeros(A ,device=A ,dtype=torch.bool )
if input_embeds is not None:
__A = input_embeds
else:
__A = self.transformer.transformer.wte(A )
for i in range(A ):
__A = self.transformer(inputs_embeds=A )
__A = outputs.logits
__A = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__A = logits.softmax(-1 ).log()
if scores is None:
__A , __A = logits.topk(A ,-1 )
__A = generated.expand(A ,*generated.shape[1:] )
__A , __A = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
__A = next_tokens
else:
__A = tokens.expand(A ,*tokens.shape[1:] )
__A = torch.cat((tokens, next_tokens) ,dim=1 )
else:
__A = -float(np.inf )
__A = 0
__A = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__A = scores_sum / seq_lengths[:, None]
__A , __A = scores_sum_average.view(-1 ).topk(A ,-1 )
__A = next_tokens // scores_sum.shape[1]
__A = seq_lengths[next_tokens_source]
__A = next_tokens % scores_sum.shape[1]
__A = next_tokens.unsqueeze(1 )
__A = tokens[next_tokens_source]
__A = torch.cat((tokens, next_tokens) ,dim=1 )
__A = generated[next_tokens_source]
__A = scores_sum_average * seq_lengths
__A = is_stopped[next_tokens_source]
__A = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
__A = torch.cat((generated, next_token_embed) ,dim=1 )
__A = is_stopped + next_tokens.eq(A ).squeeze()
if is_stopped.all():
break
__A = scores / seq_lengths
__A = scores.argsort(descending=A )
# tokens tensors are already padded to max_seq_length
__A = [tokens[i] for i in order]
__A = torch.stack(A ,dim=0 )
__A = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 124 |
import copy
import re
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "hp"
snake_case_ = {}
snake_case_ = None
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : Dict ,A : Any ):
__A = prefix
__A = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase_ ( A : Dict ,A : int ):
if len(A ) == 0:
return ""
__A = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A ) + 1 ):
__A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A : str ):
__A = ""
while integer != 0:
__A = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__A = 0
while True:
__A = word + "#" + int_to_alphabetic(A )
if sword in info["reverse_short_word"]:
continue
else:
__A = sword
break
__A = short_word
__A = word
return short_word
@staticmethod
def UpperCamelCase_ ( A : int ,A : Tuple ):
__A = param_name.split("_" )
__A = [TrialShortNamer.shortname_for_word(A ,A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__A = ["", "_"]
for separator in separators:
__A = separator.join(A )
if shortname not in info["reverse_short_param"]:
__A = shortname
__A = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase_ ( A : Optional[Any] ,A : Tuple ):
__A = TrialShortNamer.shortname_for_key(A ,A )
__A = short_name
__A = param_name
@classmethod
def UpperCamelCase_ ( cls : Dict ):
if cls.NAMING_INFO is not None:
return
__A = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A ,A )
__A = info
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : List[str] ):
cls.build_naming_info()
assert cls.PREFIX is not None
__A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__A = cls.NAMING_INFO["short_param"][k]
if isinstance(A ,A ):
__A = 1 if v else 0
__A = "" if isinstance(A ,(int, float) ) else "-"
__A = f'''{key}{sep}{v}'''
name.append(A )
return "_".join(A )
@classmethod
def UpperCamelCase_ ( cls : Tuple ,A : Tuple ):
__A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__A = []
else:
__A = repr.split("_" )
__A = {}
for value in values:
if "-" in value:
__A , __A = value.split("-" )
else:
__A = re.sub("[0-9.]" ,"" ,A )
__A = float(re.sub("[^0-9.]" ,"" ,A ) )
__A = cls.NAMING_INFO["reverse_short_param"][p_k]
__A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__A = cls.DEFAULTS[k]
return parameters
| 124 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : Union[str, Any] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase = """ernie_m"""
UpperCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : int, __A : Union[str, Any] = 2_5_0_0_0_2, __A : int = 7_6_8, __A : Union[str, Any] = 1_2, __A : List[str] = 1_2, __A : List[Any] = 3_0_7_2, __A : List[str] = "gelu", __A : Any = 0.1, __A : List[Any] = 0.1, __A : Union[str, Any] = 5_1_4, __A : int = 0.0_2, __A : List[str] = 1, __A : Any = 1E-05, __A : List[Any]=None, __A : Any=False, __A : Any=0.0, **__A : Dict, ):
super().__init__(pad_token_id=__UpperCamelCase, **__UpperCamelCase )
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : List[str] = classifier_dropout
UpperCAmelCase : Any = is_decoder
UpperCAmelCase : Any = act_dropout
| 336 |
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : int ) -> float:
UpperCamelCase_ = x
UpperCamelCase_ = y
for step in range(a__ ): # noqa: B007
UpperCamelCase_ = a * a - b * b + x
UpperCamelCase_ = 2 * a * b + y
UpperCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a__ , 1 , 1 ) )
def lowerCamelCase__ ( a__ : int = 800 , a__ : int = 600 , a__ : float = -0.6 , a__ : float = 0 , a__ : float = 3.2 , a__ : int = 50 , a__ : bool = True , ) -> Image.Image:
UpperCamelCase_ = Image.new("""RGB""" , (image_width, image_height) )
UpperCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(a__ ):
for image_y in range(a__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase_ = figure_width / image_width * image_height
UpperCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase_ = get_distance(a__ , a__ , a__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase_ = get_color_coded_rgb(a__ )
else:
UpperCamelCase_ = get_black_and_white_rgb(a__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_A = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """openai-gpt"""
__SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _snake_case=4_0478 , _snake_case=512 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1e-5 , _snake_case=0.02 , _snake_case="cls_index" , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=0.1 , **_snake_case , ) -> str:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = afn
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = summary_proj_to_labels
super().__init__(**_snake_case )
| 152 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 152 | 1 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase_ = parse(importlib.metadata.version('''torch'''))
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
_A = STR_OPERATION_TO_FUNC[operation]
if isinstance(__lowercase , __lowercase ):
_A = parse(importlib.metadata.version(__lowercase ) )
return operation(__lowercase , parse(__lowercase ) )
def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
return compare_versions(__lowercase , __lowercase , __lowercase )
| 79 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = CycleDiffusionPipeline
snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A = CLIPTextModel(__UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ):
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("mps" ):
_A = torch.manual_seed(__UpperCAmelCase )
else:
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_A = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCAmelCase , "half" ):
_A = module.half()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 79 | 1 |
__UpperCamelCase : Tuple = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 51 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : List[str] = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
__lowercase : Union[str, Any] = set(
'''approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'''.split()
)
def lowercase_ ( _lowercase , _lowercase = 1 , _lowercase = "new" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : Dict = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__UpperCamelCase ) - valid_terms ) ):
lowerCamelCase_ : List[str] = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__UpperCamelCase )
lowerCamelCase_ : Optional[Any] = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase_ : Dict = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__UpperCamelCase )}
lowerCamelCase_ : List[str] = {}
for id_ in range(__UpperCamelCase ):
lowerCamelCase_ : Optional[int] = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 318 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
| 266 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = DebertaTokenizer
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : int = DebertaTokenizerFast
def _snake_case ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A: Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A: Union[str, Any] = {'''unk_token''': '''[UNK]'''}
A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = '''lower newer'''
A: str = '''lower newer'''
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: str = self.get_tokenizer()
A: Any = '''lower newer'''
A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokens + [tokenizer.unk_token]
A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
A: str = self.get_tokenizer()
A: List[str] = tokenizer('''Hello''' , '''World''' )
A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']]
# fmt: off
A: Any = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A: Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ )
for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 334 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_hidden_groups
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = AlbertModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
__lowerCAmelCase = model(__a , token_type_ids=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = AlbertForPreTraining(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = AlbertForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = AlbertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = AlbertForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = AlbertForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = AlbertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[Any] =(
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any =True
def snake_case ( self , __a , __a , __a=False ):
__lowerCAmelCase = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = AlbertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*__a )
@slow
def snake_case ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AlbertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = AlbertModel.from_pretrained("albert-base-v2" )
__lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(__a , attention_mask=__a )[0]
__lowerCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 57 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case_( a__ ):
__UpperCamelCase = '''naver-clova-ix/donut-base-finetuned-docvqa'''
__UpperCamelCase = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
__UpperCamelCase = '''document_qa'''
__UpperCamelCase = AutoProcessor
__UpperCamelCase = VisionEncoderDecoderModel
__UpperCamelCase = ['''image''', '''text''']
__UpperCamelCase = ['''text''']
def __init__( self : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : int ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : "Image" , UpperCamelCase_ : str ):
lowerCAmelCase : Any = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCAmelCase : int = task_prompt.replace('''{user_input}''' , UpperCamelCase_ )
lowerCAmelCase : Any = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' ).input_ids
lowerCAmelCase : Any = self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
lowerCAmelCase : Optional[Any] = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
lowerCAmelCase : str = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowerCAmelCase : Optional[int] = re.sub(r'''<.*?>''' , '''''' , UpperCamelCase_ , count=1 ).strip() # remove first task start token
lowerCAmelCase : Any = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"]
| 314 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case : Union[str, Any] = get_tests_dir('fixtures')
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = mock.Mock()
__lowerCAmelCase = 5_0_0
__lowerCAmelCase = {}
__lowerCAmelCase = HTTPError
__lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase__ ) as mock_head:
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase ( cls : Any ) -> int:
__lowerCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def lowercase ( cls : Union[str, Any] ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def lowercase ( self : str ) -> int:
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id='test-feature-extractor' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( self : Dict ) -> Optional[int]:
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( self : Optional[int] ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
__lowerCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 284 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Any = params
lowercase__: List[Any] = np.array(lowerCAmelCase__ )
lowercase__: Optional[Any] = np.array([len(lowerCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.lengths )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.params.max_model_input_size
lowercase__: Dict = self.lengths > max_len
logger.info(F'Splitting {sum(lowerCAmelCase__ )} too long sequences.' )
def divide_chunks(lowerCAmelCase__ , lowerCAmelCase__ ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
lowercase__: str = []
lowercase__: List[str] = []
if self.params.mlm:
lowercase__ , lowercase__: str = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowercase__ , lowercase__: int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__: Optional[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__: int = np.insert(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
if sub_s[-1] != sep_id:
lowercase__: Union[str, Any] = np.insert(lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase__ )
new_tok_ids.extend(lowerCAmelCase__ )
new_lengths.extend([len(lowerCAmelCase__ ) for l in sub_seqs] )
lowercase__: Union[str, Any] = np.array(lowerCAmelCase__ )
lowercase__: Union[str, Any] = np.array(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = len(self )
lowercase__: Union[str, Any] = self.lengths > 11
lowercase__: List[Any] = self.token_ids[indices]
lowercase__: Optional[Any] = self.lengths[indices]
lowercase__: List[str] = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__: List[str] = self.params.special_tok_ids['unk_token']
lowercase__: str = len(self )
lowercase__: Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__: Any = (unk_occs / self.lengths) < 0.5
lowercase__: Tuple = self.token_ids[indices]
lowercase__: str = self.lengths[indices]
lowercase__: Dict = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = [t[0] for t in batch]
lowercase__: Dict = [t[1] for t in batch]
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
# Max for paddings
lowercase__: List[Any] = max(lowerCAmelCase__ )
# Pad token ids
if self.params.mlm:
lowercase__: Dict = self.params.special_tok_ids['pad_token']
else:
lowercase__: Optional[Any] = self.params.special_tok_ids['unk_token']
lowercase__: int = [list(t.astype(lowerCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase__ )
assert all(len(lowerCAmelCase__ ) == max_seq_len_ for t in tk_ )
lowercase__: Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__: Optional[Any] = torch.tensor(lowerCAmelCase__ ) # (bs)
return tk_t, lg_t
| 196 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = list(s_dict.keys() )
for key in keys:
snake_case_ : Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : str = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Optional[int] = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : List[Any] = emb.weight.shape
snake_case_ : List[str] = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Any = os.path.basename(_UpperCamelCase )
snake_case_ : List[Any] = url.split('''/''' )[-2]
snake_case_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : List[Any] = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : List[Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : Any = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : str = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : Tuple = original_checkpoint['''dims''']
snake_case_ : Tuple = original_checkpoint['''model_state_dict''']
snake_case_ : Tuple = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : str = True
snake_case_ : Optional[Any] = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : Tuple = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : str = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : Optional[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : str = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase_ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = VOCAB_FILES_NAMES
lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=__magic_name__ , merges=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , ) , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , **__magic_name__ , )
snake_case_ : Optional[int] = add_prefix_space
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 279 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = BartphoTokenizer
UpperCAmelCase_ = False
UpperCAmelCase_ = True
def snake_case_ (self ) -> List[str]:
super().setUp()
UpperCamelCase = ["▁This", "▁is", "▁a", "▁t", "est"]
UpperCamelCase = dict(zip(__a , range(len(__a ) ) ) )
UpperCamelCase = {"unk_token": "<unk>"}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
UpperCamelCase = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ (self , **__a ) -> int:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case_ (self , __a ) -> str:
UpperCamelCase = "This is a là test"
UpperCamelCase = "This is a<unk><unk> test"
return input_text, output_text
def snake_case_ (self ) -> Dict:
UpperCamelCase = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCamelCase = "This is a là test"
UpperCamelCase = "▁This ▁is ▁a ▁l à ▁t est".split()
UpperCamelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 153 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 4_000_000 ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = b, a + b
return sum(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Tuple = DiTPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ : List[str] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
SCREAMING_SNAKE_CASE__ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : str = False
def snake_case_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=a__ , )
_lowerCamelCase = AutoencoderKL()
_lowerCamelCase = DDIMScheduler()
_lowerCamelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def snake_case_ ( self , a__ , a__=0 ):
if str(a__ ).startswith('mps' ):
_lowerCamelCase = torch.manual_seed(a__ )
else:
_lowerCamelCase = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCamelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ):
_lowerCamelCase = 'cpu'
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCamelCase = self.get_dummy_inputs(a__ )
_lowerCamelCase = pipe(**a__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCamelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def snake_case_ ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_lowerCamelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_lowerCamelCase = pipe.get_label_ids(a__ )
_lowerCamelCase = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(a__ , a__ ):
_lowerCamelCase = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def snake_case_ ( self ):
_lowerCamelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_lowerCamelCase = ['vase', 'umbrella']
_lowerCamelCase = pipe.get_label_ids(a__ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(a__ , a__ ):
_lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 369 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ : Any =None
A_ : Optional[int] =logging.get_logger(__name__)
A_ : List[str] ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A_ : List[Any] ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
A_ : Any ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
A_ : Union[str, Any] ="""▁"""
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : int = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case_ ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 80 | 0 |
import socket
def A_ ( ):
SCREAMING_SNAKE_CASE_: str = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_: Tuple = socket.gethostname()
SCREAMING_SNAKE_CASE_: str = 1_23_12
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
SCREAMING_SNAKE_CASE_: List[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(_UpperCAmelCase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 13 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["speech"]
def __init__( self : Tuple ,*lowercase_ : Tuple ,**lowercase_ : List[str] ):
requires_backends(self ,['''speech'''] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["speech"]
def __init__( self : Union[str, Any] ,*lowercase_ : List[str] ,**lowercase_ : Any ):
requires_backends(self ,['''speech'''] )
| 106 | 0 |
import requests
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = {"Content-Type": "application/json"}
__lowerCamelCase = requests.post(A__ , json={"""text""": message_body} , headers=A__ )
if response.status_code != 200:
__lowerCamelCase = (
"Request to slack returned an error "
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(A__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 369 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29 | 0 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a : Tuple = '''bert-base-cased'''
a : List[Any] = '''google/pegasus-xsum'''
a : List[Any] = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
a : Dict = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
a : str = '''patrickvonplaten/t5-tiny-random'''
a : List[str] = '''sshleifer/bart-tiny-random'''
a : int = '''sshleifer/tiny-mbart'''
a : int = '''sshleifer/tiny-marian-en-de'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Path , _lowercase : list ) ->Optional[Any]:
'''simple docstring'''
a : str = "\n".join(_lowercase )
Path(_lowercase ).open("w" ).writelines(_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_lowercase , F"""{split}.source""" ) , _lowercase )
_dump_articles(os.path.join(_lowercase , F"""{split}.target""" ) , _lowercase )
return tmp_dir
class __UpperCamelCase ( a__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
a : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
a : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a : Tuple = max(len(tokenizer.encode(lowerCAmelCase__ ) ) for a in ARTICLES )
a : Dict = max(len(tokenizer.encode(lowerCAmelCase__ ) ) for a in SUMMARIES )
a : int = 4
a : Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
a, a : str = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
a : Union[str, Any] = SeqaSeqDataset(
lowerCAmelCase__ , data_dir=lowerCAmelCase__ , type_path="train" , max_source_length=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , )
a : List[Any] = DataLoader(lowerCAmelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
a : Optional[int] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
a : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a : Optional[int] = max(len(tokenizer.encode(lowerCAmelCase__ ) ) for a in ARTICLES )
a : str = max(len(tokenizer.encode(lowerCAmelCase__ ) ) for a in SUMMARIES )
a : str = 4
a : Dict = LegacySeqaSeqDataset(
lowerCAmelCase__ , data_dir=lowerCAmelCase__ , type_path="train" , max_source_length=20 , max_target_length=lowerCAmelCase__ , )
a : str = DataLoader(lowerCAmelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Dict:
a : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
a : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
a : Any = tmp_dir.joinpath("train.source" ).open().readlines()
a : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCAmelCase__ , lowerCAmelCase__ , 128 , lowerCAmelCase__ )
a : List[str] = {x.name for x in tmp_dir.iterdir()}
a : Tuple = {x.name for x in save_dir.iterdir()}
a : Tuple = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCAmelCase__ ) < len(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCAmelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> int:
if not FAIRSEQ_AVAILABLE:
return
a, a, a : List[str] = self._get_dataset(max_len=64 )
a : str = 64
a : Optional[int] = ds.make_dynamic_sampler(lowerCAmelCase__ , required_batch_size_multiple=lowerCAmelCase__ )
a : Dict = [len(lowerCAmelCase__ ) for x in batch_sampler]
assert len(set(lowerCAmelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) # no dropped or added examples
a : str = DataLoader(lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
a : Optional[Any] = []
a : Optional[int] = []
for batch in data_loader:
a : int = batch["input_ids"].shape
a : Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
a : Tuple = np.product(batch["input_ids"].shape )
num_src_per_batch.append(lowerCAmelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCAmelCase__ )
assert num_src_per_batch[0] == max(lowerCAmelCase__ )
if failures:
raise AssertionError(f"""too many tokens in {len(lowerCAmelCase__ )} batches""" )
def __a ( self ) -> Any:
a, a, a : Optional[int] = self._get_dataset(max_len=512 )
a : Optional[int] = 2
a : List[str] = ds.make_sortish_sampler(lowerCAmelCase__ , shuffle=lowerCAmelCase__ )
a : Union[str, Any] = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
a : Any = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCAmelCase__ )
a : List[Any] = tokenizer.pad_token_id
def count_pad_tokens(lowerCAmelCase__ , lowerCAmelCase__="input_ids" ):
return [batch[k].eq(lowerCAmelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCAmelCase__ , k="labels" ) ) < sum(count_pad_tokens(lowerCAmelCase__ , k="labels" ) )
assert sum(count_pad_tokens(lowerCAmelCase__ ) ) < sum(count_pad_tokens(lowerCAmelCase__ ) )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__=1000 , lowerCAmelCase__=128 ) -> Dict:
if os.getenv("USE_REAL_DATA" , lowerCAmelCase__ ):
a : Tuple = "examples/seq2seq/wmt_en_ro"
a : Optional[Any] = max_len * 2 * 64
if not Path(lowerCAmelCase__ ).joinpath("train.len" ).exists():
save_len_file(lowerCAmelCase__ , lowerCAmelCase__ )
else:
a : Tuple = "examples/seq2seq/test_data/wmt_en_ro"
a : Optional[int] = max_len * 4
save_len_file(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
a : Tuple = SeqaSeqDataset(
lowerCAmelCase__ , data_dir=lowerCAmelCase__ , type_path="train" , max_source_length=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Optional[Any]:
a, a, a : Tuple = self._get_dataset()
a : List[str] = set(DistributedSortishSampler(lowerCAmelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCAmelCase__ ) )
a : Dict = set(DistributedSortishSampler(lowerCAmelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCAmelCase__ ) )
assert idsa.intersection(lowerCAmelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , use_fast=lowerCAmelCase__ )
if tok_name == MBART_TINY:
a : Optional[Any] = SeqaSeqDataset(
lowerCAmelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
a : str = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
a : Any = SeqaSeqDataset(
lowerCAmelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
a : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCAmelCase__ ) == 1 if tok_name == BART_TINY else len(lowerCAmelCase__ ) == 0
| 105 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : Dict=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int="relu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=None , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Any = embeddings_size
lowerCAmelCase_ : Dict = hidden_sizes
lowerCAmelCase_ : Any = depths
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : Optional[int] = scope
lowerCAmelCase_ : Tuple = len(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : List[Any] = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : int = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = TFRegNetModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ : str = layer_type
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str={} ):
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
lowerCAmelCase_ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : int = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Dict = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
lowerCAmelCase_ : List[str] = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCAmelCase_ : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 224 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__snake_case = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] ="""maskformer"""
A__ : Tuple ={"""hidden_size""": """mask_feature_size"""}
A__ : int =["""resnet""", """swin"""]
A__ : Union[str, Any] =["""detr"""]
def __init__( self : int , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 20.0 , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Optional[int] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE__ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = backbone_config.pop('model_type' )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(UpperCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE__ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE__ = (
decoder_config.pop('model_type' ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE__ = fpn_feature_size
SCREAMING_SNAKE_CASE__ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE__ = cross_entropy_weight
SCREAMING_SNAKE_CASE__ = dice_weight
SCREAMING_SNAKE_CASE__ = mask_weight
SCREAMING_SNAKE_CASE__ = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ = no_object_weight
SCREAMING_SNAKE_CASE__ = output_auxiliary_logits
SCREAMING_SNAKE_CASE__ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase_ )
@classmethod
def A_ ( cls : Dict , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : str ):
return cls(
backbone_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 169 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case = False
try:
__snake_case = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = [] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = choices
SCREAMING_SNAKE_CASE__ = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = '*'
else:
SCREAMING_SNAKE_CASE__ = '➔ '
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase_ )
else:
forceWrite(self.choices[index] , UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(UpperCAmelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def A_ ( self : List[Any] , UpperCAmelCase_ : Direction , UpperCAmelCase_ : int = 1 ):
SCREAMING_SNAKE_CASE__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase_ )
move_cursor(UpperCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A_ ( self : Optional[Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A_ ( self : List[Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A_ ( self : Dict ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A_ ( self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase_ )
else:
return
else:
return
def A_ ( self : Optional[int] , UpperCAmelCase_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
SCREAMING_SNAKE_CASE__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ = default_choice
else:
SCREAMING_SNAKE_CASE__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCAmelCase_ , '\n' )
return choice
| 169 | 1 |
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]:
return [list(range(1000 - i ,-1000 - i ,-1 ) ) for i in range(1000 )]
lowerCamelCase : List[Any] = generate_large_matrix()
lowerCamelCase : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
assert all(row == sorted(lowercase ,reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase ,reverse=lowercase ) for col in zip(*lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Tuple = 0
snake_case : List[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case : Tuple = (left + right) // 2
snake_case : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case : List[Any] = mid + 1
else:
snake_case : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Dict = len(grid[0] )
for i in range(len(lowercase ) ):
snake_case : Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case : List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case : int = timeit(f"""{func}(grid=grid)""" ,setup=lowercase ,number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
snake_case : Any = parent
snake_case : List[Any] = batch_size
snake_case : List[Any] = seq_length
snake_case : Dict = is_training
snake_case : List[str] = use_input_mask
snake_case : List[str] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Optional[int] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : List[str] = num_choices
snake_case : Optional[int] = scope
def UpperCAmelCase ( self ) -> int:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
if self.use_token_type_ids:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Optional[int] = None
snake_case : str = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Any:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]:
snake_case : Tuple = NystromformerModel(config=A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , token_type_ids=A )
snake_case : List[str] = model(A , token_type_ids=A )
snake_case : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
snake_case : List[Any] = NystromformerForMaskedLM(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
snake_case : List[Any] = NystromformerForQuestionAnswering(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : Optional[int] = self.num_labels
snake_case : Union[str, Any] = NystromformerForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int:
snake_case : Union[str, Any] = self.num_labels
snake_case : Union[str, Any] = NystromformerForTokenClassification(config=A )
model.to(A )
model.eval()
snake_case : int = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : List[Any] = self.num_choices
snake_case : Union[str, Any] = NystromformerForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[str] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Any = config_and_inputs
snake_case : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = NystromformerModelTester(self )
snake_case : str = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Any:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = NystromformerModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case : Optional[int] = model(A )[0]
snake_case : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , A )
snake_case : Union[str, Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Union[str, Any] = """the [MASK] of Belgium is Brussels"""
snake_case : Any = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : int = tokenizer(A , return_tensors="""pt""" )
with torch.no_grad():
snake_case : Optional[int] = model(encoding.input_ids ).logits
snake_case : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A ) , """capital""" )
| 124 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =extended_euclid(_UpperCamelCase , a % b )
_SCREAMING_SNAKE_CASE =a // b
return (y, x - k * y)
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
"""simple docstring"""
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =extended_euclid(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =na * na
_SCREAMING_SNAKE_CASE =ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
"""simple docstring"""
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =extended_euclid(_UpperCamelCase , _UpperCamelCase )
if b < 0:
_SCREAMING_SNAKE_CASE =(b % n + n) % n
return b
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =invert_modulo(_UpperCamelCase , _UpperCamelCase ), invert_modulo(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =na * na
_SCREAMING_SNAKE_CASE =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 350 |
'''simple docstring'''
from math import factorial
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_SCREAMING_SNAKE_CASE =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_SCREAMING_SNAKE_CASE =float(factorial(_UpperCamelCase ) )
coefficient /= factorial(_UpperCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 114 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_a = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_a = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_a = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = len([g for position, g in enumerate(__lowerCamelCase ) if g == main_target[position]] )
return (item, float(__lowerCamelCase ))
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = random.randint(0, len(__lowerCamelCase ) - 1 )
UpperCAmelCase_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase_ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = list(__lowerCamelCase )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase_ : List[str] = random.choice(__lowerCamelCase )
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
UpperCAmelCase_ : Optional[Any] = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase_ : Tuple = int(parent_a[1] * 100 ) + 1
UpperCAmelCase_ : int = 10 if child_n >= 10 else child_n
for _ in range(__lowerCamelCase ):
UpperCAmelCase_ : Dict = population_score[random.randint(0, __lowerCamelCase )][0]
UpperCAmelCase_ , UpperCAmelCase_ : int = crossover(parent_a[0], __lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(__lowerCamelCase, __lowerCamelCase ) )
pop.append(mutate(__lowerCamelCase, __lowerCamelCase ) )
return pop
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase_ : List[str] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase_ : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase_ : List[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__lowerCamelCase )
# Generate random starting population.
UpperCAmelCase_ : Dict = []
for _ in range(__lowerCamelCase ):
population.append("".join([random.choice(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase_ : Optional[Any] = [evaluate(__lowerCamelCase, __lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase_ : Any = sorted(__lowerCamelCase, key=lambda __lowerCamelCase : x[1], reverse=__lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase_ : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowerCamelCase )
# Normalize population score to be between 0 and 1.
UpperCAmelCase_ : Dict = [
(item, score / len(__lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__lowerCamelCase ):
population.extend(select(population_score[int(__lowerCamelCase )], __lowerCamelCase, __lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_a = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_a = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_a , _a , _a = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 61 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a_ :
def __init__( self : Any , lowercase : Optional[int] , lowercase : List[Any]=13 , lowercase : int=10 , lowercase : str=3 , lowercase : List[Any]=2 , lowercase : Dict=2 , lowercase : List[str]=2 , lowercase : int=True , lowercase : List[Any]=True , lowercase : Union[str, Any]=32 , lowercase : Optional[int]=5 , lowercase : List[Any]=4 , lowercase : List[str]=37 , lowercase : Union[str, Any]="gelu" , lowercase : List[Any]=0.1 , lowercase : Any=0.1 , lowercase : Optional[Any]=10 , lowercase : Union[str, Any]=0.02 , lowercase : Optional[int]=0.9 , lowercase : List[str]=None , ):
"""simple docstring"""
lowercase_ :Optional[int] = parent
lowercase_ :str = batch_size
lowercase_ :Optional[int] = image_size
lowercase_ :Tuple = num_channels
lowercase_ :Optional[Any] = patch_size
lowercase_ :List[str] = tubelet_size
lowercase_ :List[Any] = num_frames
lowercase_ :Dict = is_training
lowercase_ :Optional[int] = use_labels
lowercase_ :Optional[int] = hidden_size
lowercase_ :List[str] = num_hidden_layers
lowercase_ :List[str] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Any = hidden_act
lowercase_ :Tuple = hidden_dropout_prob
lowercase_ :str = attention_probs_dropout_prob
lowercase_ :Any = type_sequence_label_size
lowercase_ :int = initializer_range
lowercase_ :Dict = mask_ratio
lowercase_ :Optional[int] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase_ :str = (image_size // patch_size) ** 2
lowercase_ :Union[str, Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase_ :Optional[Any] = int(mask_ratio * self.seq_length )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ :Union[str, Any] = None
if self.use_labels:
lowercase_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , lowercase : Dict , lowercase : Dict , lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :int = VideoMAEModel(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[int] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : str , lowercase : str , lowercase : List[str] , lowercase : int ):
"""simple docstring"""
lowercase_ :Union[str, Any] = VideoMAEForPreTraining(lowercase )
model.to(lowercase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase_ :Optional[int] = torch.ones((self.num_masks,) )
lowercase_ :List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase_ :Dict = mask.expand(self.batch_size , -1 ).bool()
lowercase_ :str = model(lowercase , lowercase )
# model only returns predictions for masked patches
lowercase_ :Any = mask.sum().item()
lowercase_ :Tuple = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :int = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ :Dict = config_and_inputs
lowercase_ :Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__A = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :List[Any] = VideoMAEModelTester(self )
lowercase_ :Dict = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def lowercase__ ( self : List[Any] , lowercase : List[str] , lowercase : List[str] , lowercase : List[str]=False ):
"""simple docstring"""
lowercase_ :Tuple = copy.deepcopy(lowercase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase_ :Tuple = torch.ones((self.model_tester.num_masks,) )
lowercase_ :Tuple = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase_ :Optional[int] = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase_ :Dict = bool_masked_pos.to(lowercase )
if return_labels:
if model_class in [
*get_values(lowercase ),
]:
lowercase_ :Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def lowercase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def lowercase__ ( self : Dict ):
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ , lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Dict = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ :List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ , lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Any = model_class(lowercase )
lowercase_ :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :List[str] = [*signature.parameters.keys()]
lowercase_ :str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
@slow
def lowercase__ ( self : Dict ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :List[Any] = VideoMAEModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :Union[str, Any] = True
for model_class in self.all_model_classes:
lowercase_ :Dict = self.model_tester.seq_length - self.model_tester.num_masks
lowercase_ :Optional[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase_ :Union[str, Any] = True
lowercase_ :List[Any] = False
lowercase_ :Optional[int] = True
lowercase_ :Union[str, Any] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowercase_ :List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
lowercase_ :str = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ :Union[str, Any] = True
lowercase_ :Optional[Any] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowercase_ :Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
lowercase_ :Union[str, Any] = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase_ :List[str] = len(lowercase )
# Check attention is always last and order is fine
lowercase_ :Optional[Any] = True
lowercase_ :Dict = True
lowercase_ :Dict = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowercase_ :List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
lowercase_ :int = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase__ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowercase : Union[str, Any] , lowercase : Dict , lowercase : Any ):
lowercase_ :Any = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowercase_ :Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) )
lowercase_ :Optional[int] = outputs.hidden_states
lowercase_ :Any = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase ) , lowercase )
lowercase_ :List[str] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase_ :List[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Optional[int] = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ :List[Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( ):
lowercase_ :Dict = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" )
lowercase_ :Optional[Any] = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Any ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
lowercase )
lowercase_ :List[str] = self.default_image_processor
lowercase_ :List[str] = prepare_video()
lowercase_ :int = image_processor(lowercase , return_tensors="pt" ).to(lowercase )
# forward pass
with torch.no_grad():
lowercase_ :Dict = model(**lowercase )
# verify the logits
lowercase_ :Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase )
lowercase_ :int = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
@slow
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :List[Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(lowercase )
lowercase_ :Dict = self.default_image_processor
lowercase_ :Union[str, Any] = prepare_video()
lowercase_ :List[str] = image_processor(lowercase , return_tensors="pt" ).to(lowercase )
# add boolean mask, indicating which patches to mask
lowercase_ :int = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowercase_ :List[str] = torch.load(lowercase )
# forward pass
with torch.no_grad():
lowercase_ :List[Any] = model(**lowercase )
# verify the logits
lowercase_ :Union[str, Any] = torch.Size([1, 1_408, 1_536] )
lowercase_ :List[Any] = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=lowercase )
self.assertEqual(outputs.logits.shape , lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase_ :Any = torch.tensor([0.51_42] , device=lowercase )
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase_ :Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=lowercase ).to(
lowercase )
with torch.no_grad():
lowercase_ :Tuple = model(**lowercase )
lowercase_ :Optional[Any] = torch.tensor(torch.tensor([0.64_69] ) , device=lowercase )
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4 ) )
| 223 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowercase__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[int] ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 358 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger('''transformers.models.encodec''')
lowercase__ : Optional[int] = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowercase__ : Tuple = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowercase__ : List[str] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowercase__ : List[Any] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowercase__ : int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowercase__ : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ : int = []
lowercase__ : Dict = []
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Union[str, Any] = getattr(_a , _a ).shape
else:
snake_case_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : List[Any] = value
elif weight_type == "weight_v":
snake_case_ : List[Any] = value
elif weight_type == "bias":
snake_case_ : Optional[Any] = value
elif weight_type == "running_mean":
snake_case_ : str = value
elif weight_type == "running_var":
snake_case_ : List[Any] = value
elif weight_type == "num_batches_tracked":
snake_case_ : Tuple = value
elif weight_type == "weight_ih_l0":
snake_case_ : Dict = value
elif weight_type == "weight_hh_l0":
snake_case_ : str = value
elif weight_type == "bias_ih_l0":
snake_case_ : str = value
elif weight_type == "bias_hh_l0":
snake_case_ : Dict = value
elif weight_type == "weight_ih_l1":
snake_case_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
snake_case_ : Dict = value
elif weight_type == "bias_ih_l1":
snake_case_ : List[str] = value
elif weight_type == "bias_hh_l1":
snake_case_ : Optional[int] = value
else:
snake_case_ : Dict = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_, snake_case_ : Tuple = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowercase ( _a , _a , _a ):
snake_case_ : str = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case_ : Any = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case_ : int = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(_a , _a ):
logger.info(f"{name} was ignored" )
continue
snake_case_ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case_, snake_case_ : List[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
snake_case_ : Any = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
snake_case_ : str = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : str = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : int = '''weight_g'''
elif "weight_v" in name:
snake_case_ : List[str] = '''weight_v'''
elif "weight_ih_l0" in name:
snake_case_ : List[Any] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
snake_case_ : Tuple = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
snake_case_ : Any = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
snake_case_ : Dict = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
snake_case_ : str = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
snake_case_ : List[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
snake_case_ : List[Any] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
snake_case_ : List[Any] = '''bias_hh_l1'''
elif "bias" in name:
snake_case_ : Optional[int] = '''bias'''
elif "weight" in name:
snake_case_ : str = '''weight'''
elif "running_mean" in name:
snake_case_ : Optional[int] = '''running_mean'''
elif "running_var" in name:
snake_case_ : int = '''running_var'''
elif "num_batches_tracked" in name:
snake_case_ : Optional[int] = '''num_batches_tracked'''
else:
snake_case_ : Optional[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def __lowercase ( _a , _a , _a , _a=None , _a=None , ):
if config_path is not None:
snake_case_ : Optional[int] = EncodecConfig.from_pretrained(_a )
else:
snake_case_ : str = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case_ : Union[str, Any] = [8, 5, 4, 4]
snake_case_ : Optional[int] = [2.2]
snake_case_ : Any = 64
snake_case_ : Dict = 32_000
snake_case_ : int = 2_048
snake_case_ : int = False
snake_case_ : Optional[int] = False
snake_case_ : Optional[int] = False
elif model_name == "encodec_48khz":
snake_case_ : List[str] = [8, 5, 4, 2]
snake_case_ : List[Any] = [3.0, 6.0, 12.0, 24.0]
snake_case_ : Any = 48_000
snake_case_ : List[str] = 2
snake_case_ : int = False
snake_case_ : str = '''time_group_norm'''
snake_case_ : int = True
snake_case_ : List[str] = 1.0
snake_case_ : Tuple = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
snake_case_ : Any = EncodecModel(_a )
snake_case_ : str = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_a )
snake_case_ : Optional[Any] = torch.load(_a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case_ : Union[str, Any] = original_checkpoint['''best_state''']
recursively_load_weights(_a , _a , _a )
model.save_pretrained(_a )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 155 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Optional[Any] = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A : List[Any] = logging.get_logger(__name__)
class a__ ( a_ ):
__lowerCAmelCase = ["""pixel_values"""]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = True , **_a , ):
super().__init__(**_a )
lowercase : Optional[Any] = size if size is not None else {"shortest_edge": 224}
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
lowercase : str = crop_size if crop_size is not None else {"height": 256, "width": 256}
lowercase : List[str] = get_size_dict(_a , param_name="crop_size" )
lowercase : int = do_resize
lowercase : Optional[int] = size
lowercase : str = resample
lowercase : List[Any] = do_rescale
lowercase : Union[str, Any] = rescale_factor
lowercase : Optional[int] = do_center_crop
lowercase : Union[str, Any] = crop_size
lowercase : Optional[Any] = do_flip_channel_order
def __magic_name__ ( self , _a , _a , _a = PIL.Image.BILINEAR , _a = None , **_a , ):
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase : Union[str, Any] = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
lowercase : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a = None ):
return flip_channel_order(_a , data_format=_a )
def __magic_name__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : Tuple = resample if resample is not None else self.resample
lowercase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[int] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_a , default_to_square=_a )
lowercase : int = crop_size if crop_size is not None else self.crop_size
lowercase : Any = get_size_dict(_a , param_name="crop_size" )
lowercase : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowercase : Any = [to_numpy_array(_a ) for image in images]
if do_resize:
lowercase : Optional[int] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowercase : str = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowercase : Union[str, Any] = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase : int = [self.flip_channel_order(image=_a ) for image in images]
lowercase : int = [to_channel_dimension_format(_a , _a ) for image in images]
lowercase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_a ):
lowercase : Tuple = target_sizes.numpy()
lowercase : List[Any] = []
for idx in range(len(_a ) ):
lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a )
lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowercase : str = logits.argmax(dim=1 )
lowercase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 202 | 1 |
"""simple docstring"""
import string
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = ''''''
for i in sequence:
__UpperCAmelCase = ord(UpperCamelCase__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = string.ascii_letters
__UpperCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCamelCase__ )] if c in letters else c for c in sequence )
def lowercase__ ( ):
from timeit import timeit
print('''Running performance benchmarks...''' )
__UpperCAmelCase = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=UpperCamelCase__ )} seconds''' )
print(F'''> atbash(): {timeit("atbash(printable)" , setup=UpperCamelCase__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 352 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Union[str, Any] = ["onnx"]
def __init__( self : Any , *_lowercase : Dict , **_lowercase : Any ):
requires_backends(self , ['''onnx'''] )
@classmethod
def a ( cls : str , *_lowercase : List[Any] , **_lowercase : int ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def a ( cls : Union[str, Any] , *_lowercase : List[str] , **_lowercase : Optional[int] ):
requires_backends(cls , ['''onnx'''] )
| 86 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def snake_case_ ():
UpperCAmelCase = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7],
}
UpperCAmelCase = Dataset.from_dict(_a )
return dataset
class _a ( __a ):
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = get_dataset()
UpperCAmelCase = make_duplicate_clusters(lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = get_dataset()
UpperCAmelCase , UpperCAmelCase = deduplicate_dataset(lowercase )
self.assertEqual(len(lowercase ) , 2 )
print(lowercase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowercase )
| 34 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , A_ , A_=13 , A_=32 , A_=3 , A_=4 , A_=[10, 20, 30, 40] , A_=[2, 2, 3, 2] , A_=True , A_=True , A_=37 , A_="gelu" , A_=10 , A_=0.02 , A_=["stage2", "stage3", "stage4"] , A_=[2, 3, 4] , A_=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ConvNextVaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = ConvNextVaForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ConvNextVaBackbone(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextVaBackbone(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowercase : int = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : int = False
__lowercase : Optional[Any] = False
__lowercase : List[str] = False
__lowercase : str = False
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ConvNextVaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase = True
if model_class.__name__ in [
*get_values(A_ ),
*get_values(A_ ),
]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase = False
UpperCamelCase = True
if (
model_class.__name__
in [*get_values(A_ ), *get_values(A_ )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = preprocessor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([0.9996, 0.1966, -0.4386] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 110 |
from __future__ import annotations
class lowercase :
def __init__( self , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = text, pattern
UpperCamelCase , UpperCamelCase = len(A_ ), len(A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCamelCase ( self ) -> list[int]:
"""simple docstring"""
# searches pattern in text and returns index positions
UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCamelCase = self.mismatch_in_text(A_ )
if mismatch_index == -1:
positions.append(A_ )
else:
UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_UpperCAmelCase : Union[str, Any] = "ABAABA"
_UpperCAmelCase : Any = "AB"
_UpperCAmelCase : Dict = BoyerMooreSearch(text, pattern)
_UpperCAmelCase : Optional[int] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 110 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 1
@register_to_config
def __init__( self : Any , _lowerCAmelCase : int = 2_000 , _lowerCAmelCase : float = 0.15 , _lowerCAmelCase : float = 0.01 , _lowerCAmelCase : float = 1348.0 , _lowerCAmelCase : float = 1E-5 , _lowerCAmelCase : int = 1 , ):
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_ = sigma_max
# setable values
SCREAMING_SNAKE_CASE_ = None
self.set_sigmas(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[int] = None ):
return sample
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : float = None , _lowerCAmelCase : Union[str, torch.device] = None ):
SCREAMING_SNAKE_CASE_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE_ = torch.linspace(1 , _lowerCAmelCase , _lowerCAmelCase , device=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : float = None , _lowerCAmelCase : float = None , _lowerCAmelCase : float = None ):
SCREAMING_SNAKE_CASE_ = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE_ = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE_ = torch.exp(torch.linspace(math.log(_lowerCAmelCase ) , math.log(_lowerCAmelCase ) , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : bool = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
SCREAMING_SNAKE_CASE_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE_ = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE_ = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE_ = self.get_adjacent_sigma(_lowerCAmelCase , _lowerCAmelCase ).to(sample.device )
SCREAMING_SNAKE_CASE_ = torch.zeros_like(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE_ = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_lowerCAmelCase , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_lowerCAmelCase , prev_sample_mean=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : bool = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE_ = randn_tensor(sample.shape , layout=sample.layout , generator=_lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE_ = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = sample + step_size * model_output
SCREAMING_SNAKE_CASE_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_lowerCAmelCase ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE_ = noise + original_samples
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 225 |
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for j in range(i + 1 , __UpperCAmelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ = arr[j]
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
for i, outer in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ = inner
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [-1] * arr_size
for index in reversed(range(__UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 225 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = torch.device('''cpu''')
def __A ( ) -> Any:
__a : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : Dict = Image.open(requests.get(a_ , stream=a_).raw)
return im
def __A ( a_ :List[str]) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01])
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01])
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02])
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02])
def __A ( a_ :str , a_ :Optional[int] , a_ :str) -> List[Any]:
__a : Union[str, Any] = dct.pop(a_)
__a : List[Any] = val
def __A ( a_ :str) -> List[Any]:
__a : List[str] = []
for k in state_dict.keys():
__a : Optional[Any] = k
if ".pwconv" in k:
__a : List[str] = k_new.replace('''.pwconv''' , '''.point_wise_conv''')
if ".dwconv" in k:
__a : int = k_new.replace('''.dwconv''' , '''.depth_wise_conv''')
if ".Proj." in k:
__a : Union[str, Any] = k_new.replace('''.Proj.''' , '''.proj.''')
if "patch_embed" in k_new:
__a : str = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''')
if "network" in k_new:
__a : int = k_new.split('''.''')
if ls[2].isdigit():
__a : Optional[int] = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:])
else:
__a : Any = k_new.replace('''network''' , '''swiftformer.encoder.network''')
rename_keys.append((k, k_new))
return rename_keys
@torch.no_grad()
def __A ( a_ :Optional[Any] , a_ :Union[str, Any] , a_ :str) -> Dict:
__a : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : Optional[Any] = 10_00
__a : str = '''huggingface/label-files'''
__a : List[Any] = '''imagenet-1k-id2label.json'''
__a : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : Optional[int] = {int(a_): v for k, v in idalabel.items()}
__a : List[Any] = idalabel
__a : int = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : str = [3, 3, 6, 4]
__a : Union[str, Any] = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
__a : str = [3, 3, 9, 6]
__a : Dict = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
__a : int = [4, 3, 10, 5]
__a : str = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
__a : Optional[Any] = [4, 4, 12, 6]
__a : Tuple = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https'''):
__a : Union[str, Any] = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''' , check_hash=a_)
else:
__a : Tuple = torch.load(a_ , map_location='''cpu''')
__a : Union[str, Any] = checkpoint
__a : Any = create_rename_keys(a_)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a_ , a_ , a_)
# load HuggingFace model
__a : Dict = SwiftFormerForImageClassification(a_).eval()
hf_model.load_state_dict(a_)
# prepare test inputs
__a : Optional[Any] = prepare_img()
__a : Dict = ViTImageProcessor.from_pretrained('''preprocessor_config''')
__a : int = processor(images=a_ , return_tensors='''pt''')
# compare outputs from both models
__a : List[str] = get_expected_output(a_)
__a : str = hf_model(inputs['''pixel_values''']).logits
assert hf_logits.shape == torch.Size([1, 10_00])
assert torch.allclose(hf_logits[0, 0:5] , a_ , atol=1e-3)
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""")
hf_model.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 188 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
A = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __A ( a_ :List[Any]) -> List[Any]:
__a : List[Any] = {}
state_dict.pop('''pixel_mean''' , a_)
state_dict.pop('''pixel_std''' , a_)
__a : List[Any] = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a : int = key.replace(a_ , a_)
if re.match(a_ , a_):
__a : Optional[Any] = int(re.match(a_ , a_).group(2))
if layer_nb == 0:
__a : Any = key.replace('''layers.0''' , '''proj_in''')
elif layer_nb == 1:
__a : Dict = key.replace('''layers.1''' , '''layers.0''')
elif layer_nb == 2:
__a : Optional[int] = key.replace('''layers.2''' , '''proj_out''')
__a : int = value
__a : Union[str, Any] = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __A ( a_ :Optional[int] , a_ :Optional[Any] , a_ :Dict , a_ :Optional[int]="ybelkada/segment-anything") -> Dict:
__a : Dict = hf_hub_download(a_ , F"""checkpoints/{model_name}.pth""")
if "sam_vit_b" in model_name:
__a : List[str] = SamConfig()
elif "sam_vit_l" in model_name:
__a : List[Any] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__a : List[Any] = SamConfig(
vision_config=a_ , )
elif "sam_vit_h" in model_name:
__a : List[str] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__a : Optional[int] = SamConfig(
vision_config=a_ , )
__a : int = torch.load(a_ , map_location='''cpu''')
__a : Tuple = replace_keys(a_)
__a : Optional[int] = SamImageProcessor()
__a : Any = SamProcessor(image_processor=a_)
__a : Any = SamModel(a_)
hf_model.load_state_dict(a_)
__a : Dict = hf_model.to('''cuda''')
__a : Tuple = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__a : str = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''')
__a : Tuple = [[[4_00, 6_50]]]
__a : Tuple = [[1]]
__a : Tuple = processor(images=np.array(a_) , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
__a : str = hf_model(**a_)
__a : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
__a : Any = processor(
images=np.array(a_) , input_points=a_ , input_labels=a_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
__a : Optional[int] = hf_model(**a_)
__a : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
__a : str = ((75, 2_75, 17_25, 8_50),)
__a : List[str] = processor(images=np.array(a_) , input_boxes=a_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
__a : Any = hf_model(**a_)
__a : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
__a : int = [[[4_00, 6_50], [8_00, 6_50]]]
__a : Dict = [[1, 1]]
__a : Optional[Any] = processor(
images=np.array(a_) , input_points=a_ , input_labels=a_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
__a : int = hf_model(**a_)
__a : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
A = argparse.ArgumentParser()
A = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
A = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 188 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_snake_case : str = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_snake_case : List[str] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_snake_case : Union[str, Any] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A__ ( UpperCamelCase , UpperCamelCase ):
return float((preds == labels).mean() )
def A__ ( UpperCamelCase , UpperCamelCase ):
A = simple_accuracy(UpperCamelCase , UpperCamelCase )
A = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A__ ( UpperCamelCase , UpperCamelCase ):
A = float(pearsonr(UpperCamelCase , UpperCamelCase )[0] )
A = float(spearmanr(UpperCamelCase , UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self :Union[str, Any] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def lowerCamelCase ( self :str , __UpperCamelCase :List[str] , __UpperCamelCase :str ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 292 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_euler' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_euler' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=A_ , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 222 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase__ = '''pt'''
elif is_tf_available():
lowerCAmelCase__ = '''tf'''
else:
lowerCAmelCase__ = '''jax'''
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =PerceiverTokenizer
a : str =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
for i in range(len(snake_case__ ) ):
try:
lowerCAmelCase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase : Optional[Any] = list(filter(lambda snake_case__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case__ ) )
lowerCAmelCase : List[Any] = list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case__ ) , snake_case__ ) )
if max_length is not None and len(snake_case__ ) > max_length:
lowerCAmelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0:
while len(snake_case__ ) < min_length:
lowerCAmelCase : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase : Tuple = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase : Union[str, Any] = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
if " " not in output_txt and len(snake_case__ ) > 1:
lowerCAmelCase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ )
)
if with_prefix_space:
lowerCAmelCase : int = " " + output_txt
lowerCAmelCase : str = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
return output_txt, output_ids
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.perceiver_tokenizer
lowerCAmelCase : Optional[Any] = "Unicode €."
lowerCAmelCase : List[Any] = tokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowerCAmelCase : Tuple = tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]Unicode €.[SEP]" )
lowerCAmelCase : Optional[int] = tokenizer("e è é ê ë" )
lowerCAmelCase : Any = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowerCAmelCase : Optional[int] = tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.perceiver_tokenizer
lowerCAmelCase : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowerCAmelCase : int = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCAmelCase : int = tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
if FRAMEWORK != "jax":
lowerCAmelCase : Tuple = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.perceiver_tokenizer
lowerCAmelCase : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case__ )
self.assertIn("attention_mask" , snake_case__ )
self.assertNotIn("decoder_input_ids" , snake_case__ )
self.assertNotIn("decoder_attention_mask" , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.perceiver_tokenizer
lowerCAmelCase : Optional[int] = [
"Summary of the text.",
"Another summary.",
]
lowerCAmelCase : Tuple = tokenizer(
text_target=snake_case__ , max_length=32 , padding="max_length" , truncation=snake_case__ , return_tensors=snake_case__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Union[str, Any] = " He is very happy, UNwant\u00E9d,running"
lowerCAmelCase : str = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(snake_case__ )
lowerCAmelCase : Optional[Any] = after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
shutil.rmtree(snake_case__ )
lowerCAmelCase : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase : Any = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowerCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCAmelCase : List[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case__ )
lowerCAmelCase : Optional[Any] = after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase : Tuple = tokenizer.__class__.from_pretrained(snake_case__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase : Dict = json.load(snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase : Tuple = json.load(snake_case__ )
lowerCAmelCase : Tuple = [f"""<extra_id_{i}>""" for i in range(125 )]
lowerCAmelCase : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowerCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase : int = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case__ )]
lowerCAmelCase : List[str] = tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase : Tuple = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_string(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
| 133 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
lowerCAmelCase : Tuple = parser.parse_args()
return args.f
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE , "all_results.json" )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , "r" ) as f:
lowerCAmelCase : str = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""can't find {path}""" )
return results
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCAmelCase : Any = tempfile.mkdtemp()
lowerCAmelCase : List[Any] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase : Optional[int] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : int = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
lowerCAmelCase : Dict = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Tuple = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase : List[str] = get_results(snake_case__ )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Tuple = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : Any = get_results(snake_case__ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : int = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : str = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : List[str] = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : Optional[Any] = get_results(snake_case__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : List[Any] = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : Tuple = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Tuple = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : str = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Tuple = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "translation_no_trainer" ) ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Any = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
lowerCAmelCase : Dict = get_results(snake_case__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "image_classification_no_trainer" ) ) )
| 133 | 1 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase_ (A : List[Any] , A : str , A : int , A : int , A : Optional[Any] ):
# Load configuration defined in the metadata file
with open(A ) as metadata_file:
snake_case__ : List[str] = json.load(A )
snake_case__ : Optional[Any] = LukeConfig(use_entity_aware_attention=A , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
snake_case__ : int = torch.load(A , map_location='cpu' )
# Load the entity vocab file
snake_case__ : int = load_entity_vocab(A )
snake_case__ : Dict = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case__ : Any = AddedToken('<ent>' , lstrip=A , rstrip=A )
snake_case__ : Dict = AddedToken('<ent2>' , lstrip=A , rstrip=A )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(A )
with open(os.path.join(A , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(A , A )
snake_case__ : Any = LukeTokenizer.from_pretrained(A )
# Initialize the embeddings of the special tokens
snake_case__ : Optional[Any] = state_dict['embeddings.word_embeddings.weight']
snake_case__ : Dict = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
snake_case__ : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
snake_case__ : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case__ : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
snake_case__ : int = state_dict[prefix + matrix_name]
snake_case__ : Optional[Any] = state_dict[prefix + matrix_name]
snake_case__ : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case__ : Union[str, Any] = state_dict['entity_embeddings.entity_embeddings.weight']
snake_case__ : Optional[int] = entity_emb[entity_vocab['[MASK]']]
snake_case__ : Tuple = LukeModel(config=A ).eval()
snake_case__ , snake_case__ : Optional[Any] = model.load_state_dict(A , strict=A )
if not (len(A ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(A )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
snake_case__ : Any = LukeTokenizer.from_pretrained(A , task='entity_classification' )
snake_case__ : int = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
snake_case__ : str = (3_9, 4_2)
snake_case__ : int = tokenizer(A , entity_spans=[span] , add_prefix_space=A , return_tensors='pt' )
snake_case__ : Any = model(**A )
# Verify word hidden states
if model_size == "large":
snake_case__ : Optional[int] = torch.Size((1, 4_2, 1_0_2_4) )
snake_case__ : Any = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
snake_case__ : str = torch.Size((1, 4_2, 7_6_8) )
snake_case__ : Any = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
snake_case__ : Tuple = torch.Size((1, 1, 1_0_2_4) )
snake_case__ : List[str] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
snake_case__ : Tuple = torch.Size((1, 1, 7_6_8) )
snake_case__ : Tuple = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(A ) )
model.save_pretrained(A )
def lowercase_ (A : Optional[int] ):
snake_case__ : Union[str, Any] = {}
with open(A , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(A ):
snake_case__ , snake_case__ : Optional[int] = line.rstrip().split('\t' )
snake_case__ : Union[str, Any] = index
return entity_vocab
if __name__ == "__main__":
a_ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
a_ :str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 277 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :int = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 1 |
def A ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 ,999 )
for b in range(a_ ,999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| 354 |
import itertools
import math
def A ( a_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(a_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ) -> Tuple:
__UpperCamelCase : Optional[Any] =2
while True:
if is_prime(a_ ):
yield num
num += 1
def A ( a_ = 10_001 ) -> int:
return next(itertools.islice(prime_generator() ,nth - 1 ,a_ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 245 | 0 |
'''simple docstring'''
_A : List[str] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Dict = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCamelCase )
lowerCamelCase__ : int = """""".join(bin(UpperCamelCase )[2:].zfill(8 ) for byte in data )
lowerCamelCase__ : int = len(UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCamelCase__ : Optional[Any] = b"""=""" * ((6 - len(UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCamelCase ) % 6)
else:
lowerCamelCase__ : Optional[int] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : int = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCamelCase , UpperCamelCase ):
try:
lowerCamelCase__ : str = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowerCamelCase__ : Tuple = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCamelCase__ : Any = encoded_data[:-padding]
lowerCamelCase__ : int = """""".join(
bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCamelCase__ : Tuple = """""".join(
bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
lowerCamelCase__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCamelCase ) , 8 )
]
return bytes(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310 | 1 |
def __UpperCamelCase ( lowerCAmelCase__ : int = 5_0_0_0_0_0_0_0 ):
__a : int = set()
__a : str = int((limit - 2_4) ** (1 / 2) )
__a : int = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase__ ) ) )
for primea in primes:
__a : Union[str, Any] = primea * primea
for primea in primes:
__a : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
__a : int = primea * primea * primea * primea
__a : Union[str, Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 216 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 216 | 1 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ = threading.Lock()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ = logging.WARNING
SCREAMING_SNAKE_CASE__ = True
def lowercase__ ( )-> Optional[int]:
UpperCamelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , __UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def lowercase__ ( )-> str:
return __name__.split(""".""" )[0]
def lowercase__ ( )-> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowercase__ ( )-> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCamelCase = False
def lowercase__ ( )-> None:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCamelCase = None
def lowercase__ ( )-> Tuple:
return log_levels
def lowercase__ ( __UpperCamelCase = None )-> logging.Logger:
if name is None:
UpperCamelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__UpperCamelCase )
def lowercase__ ( )-> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowercase__ ( __UpperCamelCase )-> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(__UpperCamelCase )
def lowercase__ ( )-> Tuple:
return set_verbosity(__UpperCamelCase )
def lowercase__ ( )-> Union[str, Any]:
return set_verbosity(__UpperCamelCase )
def lowercase__ ( )-> Optional[int]:
return set_verbosity(__UpperCamelCase )
def lowercase__ ( )-> Tuple:
return set_verbosity(__UpperCamelCase )
def lowercase__ ( )-> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowercase__ ( )-> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowercase__ ( __UpperCamelCase )-> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__UpperCamelCase )
def lowercase__ ( )-> None:
_configure_library_root_logger()
UpperCamelCase = False
def lowercase__ ( )-> None:
_configure_library_root_logger()
UpperCamelCase = True
def lowercase__ ( )-> None:
UpperCamelCase = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(__UpperCamelCase )
def lowercase__ ( )-> None:
UpperCamelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__UpperCamelCase )
def lowercase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Tuple:
UpperCamelCase = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , __UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*__UpperCamelCase , **__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = warning_advice
@functools.lru_cache(__UpperCamelCase )
def lowercase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
self.warning(*__UpperCamelCase , **__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = warning_once
class a_ :
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
UpperCamelCase = args[0] if args else None
def __iter__( self ) -> List[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
def empty_fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Dict:
"""simple docstring"""
return self
def __exit__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return
class a_ :
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ = _tqdm_cls()
def lowercase__ ( )-> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowercase__ ( )-> Optional[Any]:
global _tqdm_active
UpperCamelCase = True
hf_hub_utils.enable_progress_bars()
def lowercase__ ( )-> str:
global _tqdm_active
UpperCamelCase = False
hf_hub_utils.disable_progress_bars()
| 183 |
'''simple docstring'''
from PIL import Image
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Image:
def brightness(__UpperCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 183 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = SwinConfig()
__UpperCamelCase = swin_name.split("""_""" )
__UpperCamelCase = name_split[1]
__UpperCamelCase = int(name_split[4] )
__UpperCamelCase = int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase = 96
__UpperCamelCase = (2, 2, 6, 2)
__UpperCamelCase = (3, 6, 12, 24)
elif model_size == "small":
__UpperCamelCase = 96
__UpperCamelCase = (2, 2, 18, 2)
__UpperCamelCase = (3, 6, 12, 24)
elif model_size == "base":
__UpperCamelCase = 128
__UpperCamelCase = (2, 2, 18, 2)
__UpperCamelCase = (4, 8, 16, 32)
else:
__UpperCamelCase = 192
__UpperCamelCase = (2, 2, 18, 2)
__UpperCamelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
__UpperCamelCase = 21_841
else:
__UpperCamelCase = 1_000
__UpperCamelCase = "huggingface/label-files"
__UpperCamelCase = "imagenet-1k-id2label.json"
__UpperCamelCase = json.load(open(hf_hub_download(A_ ,A_ ,repo_type="""dataset""" ) ,"""r""" ) )
__UpperCamelCase = {int(A_ ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
__UpperCamelCase = img_size
__UpperCamelCase = num_classes
__UpperCamelCase = embed_dim
__UpperCamelCase = depths
__UpperCamelCase = num_heads
__UpperCamelCase = window_size
return config
def _lowercase ( __A ):
'''simple docstring'''
if "patch_embed.proj" in name:
__UpperCamelCase = name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__UpperCamelCase = name.replace("""patch_embed.norm""" ,"""embeddings.norm""" )
if "layers" in name:
__UpperCamelCase = "encoder." + name
if "attn.proj" in name:
__UpperCamelCase = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
__UpperCamelCase = name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
__UpperCamelCase = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
__UpperCamelCase = name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace("""mlp.fc2""" ,"""output.dense""" )
if name == "norm.weight":
__UpperCamelCase = "layernorm.weight"
if name == "norm.bias":
__UpperCamelCase = "layernorm.bias"
if "head" in name:
__UpperCamelCase = name.replace("""head""" ,"""classifier""" )
else:
__UpperCamelCase = "swin." + name
return name
def _lowercase ( __A ,__A ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__UpperCamelCase = orig_state_dict.pop(A_ )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase = key.split(""".""" )
__UpperCamelCase = int(key_split[1] )
__UpperCamelCase = int(key_split[3] )
__UpperCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase = val[:dim, :]
__UpperCamelCase = val[
dim : dim * 2, :
]
__UpperCamelCase = val[-dim:, :]
else:
__UpperCamelCase = val[
:dim
]
__UpperCamelCase = val[
dim : dim * 2
]
__UpperCamelCase = val[
-dim:
]
else:
__UpperCamelCase = val
return orig_state_dict
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = timm.create_model(A_ ,pretrained=A_ )
timm_model.eval()
__UpperCamelCase = get_swin_config(A_ )
__UpperCamelCase = SwinForImageClassification(A_ )
model.eval()
__UpperCamelCase = convert_state_dict(timm_model.state_dict() ,A_ )
model.load_state_dict(A_ )
__UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" ,"""-""" ) ) )
__UpperCamelCase = Image.open(requests.get(A_ ,stream=A_ ).raw )
__UpperCamelCase = image_processor(images=A_ ,return_tensors="""pt""" )
__UpperCamelCase = timm_model(inputs["""pixel_values"""] )
__UpperCamelCase = model(**A_ ).logits
assert torch.allclose(A_ ,A_ ,atol=1E-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 349 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__: str = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Union[str, Any] = ['''YolosFeatureExtractor''']
A__: Optional[int] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__SCREAMING_SNAKE_CASE : List[str] = '\\n Text data.\n Second line of data.'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'file'
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
snake_case_ = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with zstd.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / """cache"""
snake_case_ = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
snake_case_ = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = """custom_cache"""
snake_case_ = """custom_extracted_dir"""
snake_case_ = tmp_path / """custom_extracted_path"""
if default_extracted:
snake_case_ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
snake_case_ = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
# absolute path
snake_case_ = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
snake_case_ = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
# absolute path
snake_case_ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
snake_case_ = """./__missing_file__.txt"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( ) -> List[Any]:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("""https://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("""ftp://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("""s3://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("""s3://huggingface.co""" )
| 233 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> Union[str, Any]:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 233 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
A__ = {}
def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str:
'''simple docstring'''
A__ = super().add_tokens(lowercase , *lowercase , **lowercase )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
" `placeholder_token` that is not already in the tokenizer." )
def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any:
'''simple docstring'''
A__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
else:
A__ = []
for i in range(lowercase ):
A__ = placeholder_token + F'_{i}'
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
A__ = output
def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = []
for i in range(len(lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A__ = self.token_map[placeholder_token]
A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
A__ = copy.copy(lowercase )
random.shuffle(lowercase )
A__ = text.replace(lowercase , " ".join(lowercase ) )
return text
def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
| 68 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ :Optional[Any] = logging.getLogger(__name__)
def lowercase_ (A : List[Any] , A : List[Any] ):
# save results
if os.path.exists(A ):
if os.path.exists(os.path.join(A , 'config.json' ) ) and os.path.isfile(
os.path.join(A , 'config.json' ) ):
os.remove(os.path.join(A , 'config.json' ) )
if os.path.exists(os.path.join(A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(A , 'pytorch_model.bin' ) )
else:
os.makedirs(A )
model.save_pretrained(A )
def lowercase_ (A : Any , A : Optional[Any]=False ):
snake_case__ : str = 2
if unlogit:
snake_case__ : Dict = torch.pow(A , A )
snake_case__ : Any = p * torch.log(A )
snake_case__ : Tuple = 0
return -plogp.sum(dim=-1 )
def lowercase_ (A : List[str] ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(A ) ) ) )
for row in range(len(A ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowercase_ (A : Tuple , A : Optional[Any] , A : str , A : int=True , A : Optional[int]=True , A : Any=None , A : int=False ):
snake_case__ , snake_case__ : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case__ : int = torch.zeros(A , A ).to(args.device )
snake_case__ : Any = torch.zeros(A , A ).to(args.device )
if head_mask is None:
snake_case__ : Dict = torch.ones(A , A ).to(args.device )
head_mask.requires_grad_(requires_grad=A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case__ : Optional[int] = None
snake_case__ : List[Any] = 0.0
snake_case__ : str = 0.0
for step, inputs in enumerate(tqdm(A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
snake_case__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((snake_case__) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case__ : Union[str, Any] = model(A , labels=A , head_mask=A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case__ , snake_case__ , snake_case__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A ):
snake_case__ : Optional[Any] = entropy(attn.detach() , A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case__ : Union[str, Any] = 2
snake_case__ : List[Any] = torch.pow(torch.pow(A , A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case__ : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(A )
logger.info('Head ranked by importance scores' )
snake_case__ : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case__ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case__ : str = head_ranks.view_as(A )
print_ad_tensor(A )
return attn_entropy, head_importance, total_loss
def lowercase_ (A : Optional[int] , A : Dict , A : Optional[int] ):
snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(A , A , A , compute_entropy=A )
snake_case__ : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , A , original_score * args.masking_threshold )
snake_case__ : Optional[Any] = torch.ones_like(A )
snake_case__ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case__ : Dict = original_score
while current_score >= original_score * args.masking_threshold:
snake_case__ : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case__ : List[Any] = float('Inf' )
snake_case__ : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
snake_case__ : int = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
snake_case__ : int = new_head_mask.view(-1 )
snake_case__ : int = 0.0
snake_case__ : Union[str, Any] = new_head_mask.view_as(A )
snake_case__ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(A )
# Compute metric and head importance again
snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(
A , A , A , compute_entropy=A , head_mask=A )
snake_case__ : Dict = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ (A : List[str] , A : Tuple , A : Optional[Any] , A : int ):
snake_case__ : Any = datetime.now()
snake_case__ , snake_case__ , snake_case__ : str = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A )
snake_case__ : Tuple = 1 / loss
snake_case__ : Dict = datetime.now() - before_time
snake_case__ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case__ : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A ) )
}
for k, v in heads_to_prune.items():
if isinstance(A , A ):
snake_case__ : Any = [
v,
]
assert sum(len(A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A )
snake_case__ : Dict = sum(p.numel() for p in model.parameters() )
snake_case__ : Tuple = datetime.now()
snake_case__ , snake_case__ , snake_case__ : Dict = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A , actually_pruned=A , )
snake_case__ : Any = 1 / loss
snake_case__ : int = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , A , A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , A , A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(A , args.output_dir )
def lowercase_ ():
snake_case__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=A , type=A , required=A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=A , type=A , required=A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=A , type=A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=A , help='Batch size.' )
parser.add_argument('--seed' , type=A , default=4_2 )
parser.add_argument('--local_rank' , type=A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=A , default='' , help='Can be used for distant debugging.' )
snake_case__ : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
snake_case__ : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case__ : int = torch.device('cuda' , args.local_rank )
snake_case__ : List[str] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case__ : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case__ : List[str] = nn.parallel.DistributedDataParallel(
A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A )
elif args.n_gpu > 1:
snake_case__ : Optional[int] = nn.DataParallel(A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A )
torch.save(A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , A )
# Prepare dataset
snake_case__ : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case__ : List[str] = (torch.from_numpy(A ),)
snake_case__ : int = TensorDataset(*A )
snake_case__ : Union[str, Any] = RandomSampler(A )
snake_case__ : Any = DataLoader(A , sampler=A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A , A , A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case__ : Dict = mask_heads(A , A , A )
prune_heads(A , A , A , A )
if __name__ == "__main__":
main()
| 277 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( UpperCamelCase__ : Dict, UpperCamelCase__ : bool = True, UpperCamelCase__ : float = math.inf, UpperCamelCase__ : float = -math.inf, UpperCamelCase__ : float = math.inf, UpperCamelCase__ : float = -math.inf, UpperCamelCase__ : bool = False, UpperCamelCase__ : float = 100, UpperCamelCase__ : float = 0.01, UpperCamelCase__ : float = 1, ):
'''simple docstring'''
UpperCamelCase__ = False
UpperCamelCase__ = search_prob
UpperCamelCase__ = start_temperate
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = None
while not search_end:
UpperCamelCase__ = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase__ = current_state
scores.append(UpperCamelCase__ )
iterations += 1
UpperCamelCase__ = None
UpperCamelCase__ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase__ = random.randint(0, len(UpperCamelCase__ ) - 1 ) # picking a random neighbor
UpperCamelCase__ = neighbors.pop(UpperCamelCase__ )
UpperCamelCase__ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase__ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase__ = picked_neighbor
else:
UpperCamelCase__ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase__ = picked_neighbor
UpperCamelCase__ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase__ = True
else:
UpperCamelCase__ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(UpperCamelCase__ ), UpperCamelCase__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowercase = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowercase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowercase = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : int ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'{local_min.score()}'
)
lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'{local_min.score()}'
)
| 350 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( UpperCamelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', UpperCamelCase__, )
if isinstance(UpperCamelCase__, torch.Tensor ):
return image
elif isinstance(UpperCamelCase__, PIL.Image.Image ):
UpperCamelCase__ = [image]
if isinstance(image[0], PIL.Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = image[0].size
UpperCamelCase__ , UpperCamelCase__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCamelCase__ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase__ = np.concatenate(UpperCamelCase__, axis=0 )
UpperCamelCase__ = np.array(UpperCamelCase__ ).astype(np.floataa ) / 255.0
UpperCamelCase__ = image.transpose(0, 3, 1, 2 )
UpperCamelCase__ = 2.0 * image - 1.0
UpperCamelCase__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0], torch.Tensor ):
UpperCamelCase__ = torch.cat(UpperCamelCase__, dim=0 )
return image
def lowerCamelCase_ ( UpperCamelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
if isinstance(UpperCamelCase__, torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__, PIL.Image.Image ):
UpperCamelCase__ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = mask[0].size
UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase__ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
UpperCamelCase__ = np.concatenate(UpperCamelCase__, axis=0 )
UpperCamelCase__ = mask.astype(np.floataa ) / 255.0
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0], torch.Tensor ):
UpperCamelCase__ = torch.cat(UpperCamelCase__, dim=0 )
return mask
class __lowercase ( A ):
'''simple docstring'''
_A : UNetaDModel
_A : RePaintScheduler
def __init__( self : Tuple , _a : Any , _a : Tuple ):
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : Optional[int] , _a : Union[torch.Tensor, PIL.Image.Image] , _a : Union[torch.Tensor, PIL.Image.Image] , _a : int = 250 , _a : float = 0.0 , _a : int = 10 , _a : int = 10 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[str] = "pil" , _a : bool = True , ):
UpperCamelCase__ = image
UpperCamelCase__ = _preprocess_image(_a )
UpperCamelCase__ = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase__ = _preprocess_mask(_a )
UpperCamelCase__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_a )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase__ = original_image.shape
UpperCamelCase__ = randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a , _a , _a , self.device )
UpperCamelCase__ = eta
UpperCamelCase__ = self.scheduler.timesteps[0] + 1
UpperCamelCase__ = generator[0] if isinstance(_a , _a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCamelCase__ = self.unet(_a , _a ).sample
# compute previous image: x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(_a , _a , _a , _a , _a , _a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCamelCase__ = self.scheduler.undo_step(_a , _a , _a )
UpperCamelCase__ = t
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 35 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = "efficientformer"
def __init__( self, SCREAMING_SNAKE_CASE_ = [3, 2, 6, 4], SCREAMING_SNAKE_CASE_ = [48, 96, 224, 448], SCREAMING_SNAKE_CASE_ = [True, True, True, True], SCREAMING_SNAKE_CASE_ = 448, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = 4, SCREAMING_SNAKE_CASE_ = 7, SCREAMING_SNAKE_CASE_ = 5, SCREAMING_SNAKE_CASE_ = 8, SCREAMING_SNAKE_CASE_ = 4, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1e-5, SCREAMING_SNAKE_CASE_ = "gelu", SCREAMING_SNAKE_CASE_ = 0.02, SCREAMING_SNAKE_CASE_ = 1e-12, SCREAMING_SNAKE_CASE_ = 224, SCREAMING_SNAKE_CASE_ = 1e-05, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
super().__init__(**__a )
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : str = hidden_sizes
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : Tuple = depths
UpperCamelCase : str = mlp_expansion_ratio
UpperCamelCase : List[str] = downsamples
UpperCamelCase : int = dim
UpperCamelCase : Any = key_dim
UpperCamelCase : Optional[Any] = attention_ratio
UpperCamelCase : int = resolution
UpperCamelCase : Tuple = pool_size
UpperCamelCase : Optional[int] = downsample_patch_size
UpperCamelCase : List[str] = downsample_stride
UpperCamelCase : str = downsample_pad
UpperCamelCase : Any = drop_path_rate
UpperCamelCase : Tuple = num_metaad_blocks
UpperCamelCase : Dict = distillation
UpperCamelCase : List[str] = use_layer_scale
UpperCamelCase : Dict = layer_scale_init_value
UpperCamelCase : Optional[int] = image_size
UpperCamelCase : Any = batch_norm_eps
| 119 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36 | 0 |
import cva
import numpy as np
class a :
'''simple docstring'''
def __init__( self : List[str] , __snake_case : float , __snake_case : int ):
if k in (0.04, 0.06):
UpperCAmelCase_ = k
UpperCAmelCase_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Tuple ):
return str(self.k )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : str ):
UpperCAmelCase_ = cva.imread(__snake_case , 0 )
UpperCAmelCase_ , UpperCAmelCase_ = img.shape
UpperCAmelCase_ = []
UpperCAmelCase_ = img.copy()
UpperCAmelCase_ = cva.cvtColor(__snake_case , cva.COLOR_GRAY2RGB )
UpperCAmelCase_ , UpperCAmelCase_ = np.gradient(__snake_case )
UpperCAmelCase_ = dx**2
UpperCAmelCase_ = dy**2
UpperCAmelCase_ = dx * dy
UpperCAmelCase_ = 0.04
UpperCAmelCase_ = self.window_size // 2
for y in range(__snake_case , h - offset ):
for x in range(__snake_case , w - offset ):
UpperCAmelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ = (wxx * wyy) - (wxy**2)
UpperCAmelCase_ = wxx + wyy
UpperCAmelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_lowerCamelCase = HarrisCorner(0.04, 3)
_lowerCamelCase , _lowerCamelCase = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 177 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "Salesforce/blip-image-captioning-base"
UpperCAmelCase__ : int = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
UpperCAmelCase__ : Optional[int] = "image_captioner"
UpperCAmelCase__ : Tuple = AutoModelForVisionaSeq
UpperCAmelCase__ : Union[str, Any] = ["image"]
UpperCAmelCase__ : Tuple = ["text"]
def __init__( self , *A_ , **A_ ) -> str:
requires_backends(self , ['vision'] )
super().__init__(*A_ , **A_ )
def _a ( self , A_ ) -> Optional[int]:
return self.pre_processor(images=A_ , return_tensors='pt' )
def _a ( self , A_ ) -> Any:
return self.model.generate(**A_ )
def _a ( self , A_ ) -> str:
return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_ )[0].strip()
| 62 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "data2vec-audio"
def __init__( self : Tuple , __lowercase : Optional[int]=32 , __lowercase : List[str]=768 , __lowercase : List[str]=12 , __lowercase : str=12 , __lowercase : Tuple=3072 , __lowercase : Any="gelu" , __lowercase : Dict=0.1 , __lowercase : Any=0.1 , __lowercase : Tuple=0.1 , __lowercase : List[str]=0.0 , __lowercase : List[Any]=0.1 , __lowercase : str=0.1 , __lowercase : Optional[int]=0.0_2 , __lowercase : Dict=1E-5 , __lowercase : Any="gelu" , __lowercase : Dict=(512, 512, 512, 512, 512, 512, 512) , __lowercase : str=(5, 2, 2, 2, 2, 2, 2) , __lowercase : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __lowercase : Dict=False , __lowercase : int=16 , __lowercase : Any=19 , __lowercase : Tuple=5 , __lowercase : Optional[Any]=0.0_5 , __lowercase : Optional[int]=10 , __lowercase : int=2 , __lowercase : Optional[Any]=0.0 , __lowercase : Tuple=10 , __lowercase : Union[str, Any]=0 , __lowercase : Optional[int]="sum" , __lowercase : str=False , __lowercase : Union[str, Any]=False , __lowercase : Any=256 , __lowercase : str=(512, 512, 512, 512, 1500) , __lowercase : Union[str, Any]=(5, 3, 3, 1, 1) , __lowercase : List[Any]=(1, 2, 3, 1, 1) , __lowercase : Any=512 , __lowercase : int=0 , __lowercase : Union[str, Any]=1 , __lowercase : Optional[int]=2 , __lowercase : Any=False , __lowercase : Optional[int]=3 , __lowercase : Optional[Any]=2 , __lowercase : Any=3 , __lowercase : Tuple=None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
__lowercase =hidden_size
__lowercase =feat_extract_activation
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =conv_bias
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =conv_pos_kernel_size
__lowercase =len(self.conv_dim )
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =vocab_size
__lowercase =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
__lowercase =mask_feature_min_masks
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =xvector_output_dim
@property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 141 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Dict = "timm_backbone"
def __init__( self : List[Any] , _A : Dict=None , _A : Optional[int]=3 , _A : Union[str, Any]=True , _A : Dict=True , _A : Optional[int]=None , **_A : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : List[str] = backbone
snake_case_ : Optional[Any] = num_channels
snake_case_ : List[Any] = features_only
snake_case_ : Any = use_pretrained_backbone
snake_case_ : str = True
snake_case_ : Tuple = out_indices if out_indices is not None else (-1,)
| 88 |
_SCREAMING_SNAKE_CASE = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 88 | 1 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = name
__lowerCAmelCase: Dict = value
__lowerCAmelCase: Tuple = weight
def __repr__( self : Optional[Any])-> List[str]:
'''simple docstring'''
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def lowercase_ ( self : List[Any])-> Optional[int]:
'''simple docstring'''
return self.value
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
return self.name
def lowercase_ ( self : int)-> str:
'''simple docstring'''
return self.weight
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
return self.value / self.weight
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: int = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
__lowerCAmelCase: int = sorted(__SCREAMING_SNAKE_CASE , key=__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = []
__lowerCAmelCase , __lowerCAmelCase: Tuple = 0.0, 0.0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a__ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: bool = True , lowerCamelCase: float = math.inf , lowerCamelCase: float = -math.inf , lowerCamelCase: float = math.inf , lowerCamelCase: float = -math.inf , lowerCamelCase: bool = False , lowerCamelCase: float = 1_00 , lowerCamelCase: float = 0.01 , lowerCamelCase: float = 1 , ):
'''simple docstring'''
__A = False
__A = search_prob
__A = start_temperate
__A = []
__A = 0
__A = None
while not search_end:
__A = current_state.score()
if best_state is None or current_score > best_state.score():
__A = current_state
scores.append(lowerCamelCase )
iterations += 1
__A = None
__A = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__A = random.randint(0 , len(lowerCamelCase ) - 1 ) # picking a random neighbor
__A = neighbors.pop(lowerCamelCase )
__A = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__A = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__A = picked_neighbor
else:
__A = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__A = picked_neighbor
__A = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__A = True
else:
__A = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCamelCase ) , lowerCamelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: Optional[int] ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case__ : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case__ : Tuple = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
snake_case__ : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _a ( lowerCamelCase: Tuple , lowerCamelCase: int ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
snake_case__ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case__ : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'{local_min.score()}'
)
snake_case__ : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case__ : List[str] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'{local_min.score()}'
)
| 362 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 250 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = []
for old_item in old_list:
lowerCamelCase : str = old_item.replace("in_layers.0" , "norm1" )
lowerCamelCase : int = new_item.replace("in_layers.2" , "conv1" )
lowerCamelCase : Union[str, Any] = new_item.replace("out_layers.0" , "norm2" )
lowerCamelCase : List[str] = new_item.replace("out_layers.3" , "conv2" )
lowerCamelCase : Dict = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowerCamelCase : Optional[int] = new_item.replace("skip_connection" , "conv_shortcut" )
lowerCamelCase : Optional[int] = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
'''simple docstring'''
lowerCamelCase : Dict = []
for old_item in old_list:
lowerCamelCase : List[str] = old_item
lowerCamelCase : Tuple = new_item.replace("norm.weight" , "group_norm.weight" )
lowerCamelCase : int = new_item.replace("norm.bias" , "group_norm.bias" )
lowerCamelCase : Optional[Any] = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowerCamelCase : Optional[int] = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowerCamelCase : List[Any] = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCamelCase : List[str] = old_checkpoint[path]
lowerCamelCase : List[str] = old_tensor.shape[0] // 3
lowerCamelCase : Optional[int] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCamelCase : List[Any] = old_tensor.shape[0] // config["num_head_channels"] // 3
lowerCamelCase : Optional[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = old_tensor.split(channels // num_heads , dim=1 )
lowerCamelCase : str = query.reshape(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = key.reshape(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = value.reshape(SCREAMING_SNAKE_CASE_ )
for path in paths:
lowerCamelCase : Any = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCamelCase : List[str] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowerCamelCase : List[str] = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowerCamelCase : Union[str, Any] = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCamelCase : Any = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCamelCase : List[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
lowerCamelCase : List[str] = old_checkpoint[path["old"]]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = {}
lowerCamelCase : List[Any] = checkpoint["time_embed.0.weight"]
lowerCamelCase : List[Any] = checkpoint["time_embed.0.bias"]
lowerCamelCase : List[Any] = checkpoint["time_embed.2.weight"]
lowerCamelCase : Optional[int] = checkpoint["time_embed.2.bias"]
lowerCamelCase : Optional[Any] = checkpoint["input_blocks.0.0.weight"]
lowerCamelCase : List[Any] = checkpoint["input_blocks.0.0.bias"]
lowerCamelCase : List[str] = checkpoint["out.0.weight"]
lowerCamelCase : List[str] = checkpoint["out.0.bias"]
lowerCamelCase : Tuple = checkpoint["out.2.weight"]
lowerCamelCase : Union[str, Any] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowerCamelCase : List[str] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowerCamelCase : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the middle blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowerCamelCase : Dict = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the output blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowerCamelCase : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[Any] = (i - 1) // (config["num_res_blocks"] + 1)
lowerCamelCase : Tuple = (i - 1) % (config["num_res_blocks"] + 1)
lowerCamelCase : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowerCamelCase : Any = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowerCamelCase : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowerCamelCase : Dict = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowerCamelCase : Optional[int] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = {"old": f"""input_blocks.{i}.0""", "new": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowerCamelCase : Tuple = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : str = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = {
"old": f"""input_blocks.{i}.1""",
"new": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCamelCase : int = {
f"""input_blocks.{i}.1.qkv.bias""": {
"key": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"key": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase : Dict = middle_blocks[0]
lowerCamelCase : Tuple = middle_blocks[1]
lowerCamelCase : List[str] = middle_blocks[2]
lowerCamelCase : Dict = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[int] = i // (config["num_res_blocks"] + 1)
lowerCamelCase : Any = i % (config["num_res_blocks"] + 1)
lowerCamelCase : int = [shave_segments(SCREAMING_SNAKE_CASE_ , 2 ) for name in output_blocks[i]]
lowerCamelCase : Union[str, Any] = {}
for layer in output_block_layers:
lowerCamelCase , lowerCamelCase : List[Any] = layer.split("." )[0], shave_segments(SCREAMING_SNAKE_CASE_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Any = [layer_name]
if len(SCREAMING_SNAKE_CASE_ ) > 1:
lowerCamelCase : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowerCamelCase : List[str] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowerCamelCase : Any = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = {"old": f"""output_blocks.{i}.0""", "new": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCamelCase : Union[str, Any] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowerCamelCase : Optional[int] = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowerCamelCase : Any = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE_ ) == 2:
lowerCamelCase : Dict = []
if len(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[int] = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = {
"old": f"""output_blocks.{i}.1""",
"new": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCamelCase : Optional[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
"key": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"key": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE_ , )
else:
lowerCamelCase : Union[str, Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCamelCase : Optional[int] = ".".join(["output_blocks", str(SCREAMING_SNAKE_CASE_ ), path["old"]] )
lowerCamelCase : Union[str, Any] = ".".join(["up_blocks", str(SCREAMING_SNAKE_CASE_ ), "resnets", str(SCREAMING_SNAKE_CASE_ ), path["new"]] )
lowerCamelCase : str = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_snake_case = parser.parse_args()
_snake_case = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_snake_case = json.loads(f.read())
_snake_case = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_snake_case = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_snake_case = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_snake_case = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_snake_case = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a ( self ):
snake_case_ = 1
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def a ( self ):
def extract(*snake_case , **snake_case ):
class lowercase :
def __init__( self ):
snake_case_ = torch.ones([0] )
def a ( self , snake_case ):
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def a ( self ):
snake_case_ = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.dummy_cond_unet
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
snake_case_ = 77
snake_case_ = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ = "A painting of a squirrel eating a burger"
snake_case_ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , )
snake_case_ = output.images
snake_case_ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def a ( self ):
snake_case_ = self.dummy_cond_unet
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
snake_case_ = 77
snake_case_ = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ = unet.half()
snake_case_ = vae.half()
snake_case_ = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ = "A painting of a squirrel eating a burger"
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ = init_image.resize((760, 504) )
snake_case_ = "BAAI/AltDiffusion"
snake_case_ = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ = "A fantasy landscape, trending on artstation"
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type='np' , )
snake_case_ = output.images[0]
snake_case_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case_ = init_image.resize((768, 512) )
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
snake_case_ = "BAAI/AltDiffusion"
snake_case_ = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ = "A fantasy landscape, trending on artstation"
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 200 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any=100 , snake_case_ : List[str]=13 , snake_case_ : str=30 , snake_case_ : int=2 , snake_case_ : int=3 , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=32 , snake_case_ : Tuple=5 , snake_case_ : Optional[int]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : List[Any]="gelu" , snake_case_ : str=0.1 , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=10 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : Optional[Any]=3 , ):
snake_case__ : Any = parent
snake_case__ : List[str] = vocab_size
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[int] = image_size
snake_case__ : int = patch_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = is_training
snake_case__ : Dict = use_labels
snake_case__ : Dict = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : str = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Dict = (image_size // patch_size) ** 2
snake_case__ : Dict = num_patches + 1
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowerCamelCase ( self : Any , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[str] ):
snake_case__ : Union[str, Any] = FlaxBeitModel(config=snake_case_ )
snake_case__ : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Dict ):
snake_case__ : str = FlaxBeitForMaskedImageModeling(config=snake_case_ )
snake_case__ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Tuple ):
snake_case__ : Dict = self.type_sequence_label_size
snake_case__ : int = FlaxBeitForImageClassification(config=snake_case_ )
snake_case__ : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : str = 1
snake_case__ : List[str] = FlaxBeitForImageClassification(snake_case_ )
snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : int = model(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = FlaxBeitModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowerCamelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(snake_case_ )
snake_case__ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : List[str] ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : str = self._prepare_for_class(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ : List[Any] , **snake_case_ : Tuple ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest("""JIT Enabled""" ):
snake_case__ : Tuple = model_jitted(**snake_case_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
snake_case__ : Tuple = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
snake_case__ : int = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[Any]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=snake_case_ , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
snake_case__ : Any = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
snake_case__ : Dict = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
snake_case__ : List[str] = outputs.logits
# verify the logits
snake_case__ : Dict = (1, 196, 8_192)
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : Any = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
snake_case__ : Optional[Any] = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors="""np""" )
# forward pass
snake_case__ : List[Any] = model(**snake_case_ )
snake_case__ : Optional[Any] = outputs.logits
# verify the logits
snake_case__ : str = (1, 1_000)
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : Any = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
snake_case__ : Union[str, Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
snake_case__ : Optional[int] = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors="""np""" )
# forward pass
snake_case__ : Optional[int] = model(**snake_case_ )
snake_case__ : int = outputs.logits
# verify the logits
snake_case__ : int = (1, 21_841)
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : Optional[int] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
snake_case__ : Optional[int] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 35 |
import os
import pytest
from attr import dataclass
UpperCAmelCase__ : Optional[int] = """us-east-1""" # defaults region
@dataclass
class a__ :
"""simple docstring"""
UpperCAmelCase__ : str
UpperCAmelCase__ : Union[str, Any] ="""arn:aws:iam::558105141721:role/sagemaker_execution_role"""
UpperCAmelCase__ : Tuple ={
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 1_6,
"""per_device_eval_batch_size""": 1_6,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_0_0,
"""save_steps""": 5_5_0_0,
}
UpperCAmelCase__ : Optional[int] ={**hyperparameters, """max_steps""": 1_0_0_0}
@property
def _lowercase ( self : List[str] ) ->str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
return f"{self.framework}-transfromers-test"
@property
def _lowercase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def _lowercase ( self : Dict ) ->str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __lowercase ( _A ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = SageMakerTestEnvironment(framework=request.cls.framework )
| 245 | 0 |
"""simple docstring"""
from itertools import permutations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowercase : Dict = [7, 11, 13, 17]
for i, test in enumerate(_UpperCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 10 ):
return sum(
int("""""".join(map(_UpperCAmelCase , _UpperCAmelCase ) ) )
for num in permutations(range(_UpperCAmelCase ) )
if is_substring_divisible(_UpperCAmelCase ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 351 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 0 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = VideoMAEConfig()
set_architecture_configs(__UpperCAmelCase, __UpperCAmelCase )
if "finetuned" not in model_name:
snake_case_ = False
if "finetuned" in model_name:
snake_case_ = '''huggingface/label-files'''
if "kinetics" in model_name:
snake_case_ = 400
snake_case_ = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
snake_case_ = 174
snake_case_ = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
snake_case_ = json.load(open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if "small" in model_name:
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 12
snake_case_ = 16
snake_case_ = 12
snake_case_ = 3
snake_case_ = 192
snake_case_ = 768
elif "large" in model_name:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
snake_case_ = 12
snake_case_ = 8
snake_case_ = 512
snake_case_ = 2048
elif "huge" in model_name:
snake_case_ = 1280
snake_case_ = 5120
snake_case_ = 32
snake_case_ = 16
snake_case_ = 12
snake_case_ = 8
snake_case_ = 640
snake_case_ = 2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if "encoder." in name:
snake_case_ = name.replace('''encoder.''', '''''' )
if "cls_token" in name:
snake_case_ = name.replace('''cls_token''', '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
snake_case_ = name.replace('''decoder_pos_embed''', '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case_ = name.replace('''pos_embed''', '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case_ = name.replace('''patch_embed.proj''', '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ = name.replace('''patch_embed.norm''', '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
snake_case_ = name.replace('''decoder.blocks''', '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''', '''videomae.encoder.layer''' )
if "attn.proj" in name:
snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
snake_case_ = name.replace('''attn''', '''attention.self''' )
if "attn" in name:
snake_case_ = name.replace('''attn''', '''attention.attention''' )
if "norm1" in name:
snake_case_ = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
snake_case_ = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' )
if "decoder_embed" in name:
snake_case_ = name.replace('''decoder_embed''', '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case_ = name.replace('''decoder_norm''', '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case_ = name.replace('''decoder_pred''', '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case_ = name.replace('''norm.weight''', '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case_ = name.replace('''norm.bias''', '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
snake_case_ = name.replace('''head''', '''classifier''' )
return name
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(__UpperCAmelCase )
if key.startswith('''encoder.''' ):
snake_case_ = key.replace('''encoder.''', '''''' )
if "qkv" in key:
snake_case_ = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
snake_case_ = config.decoder_hidden_size
snake_case_ = int(key_split[2] )
snake_case_ = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = config.hidden_size
snake_case_ = int(key_split[1] )
snake_case_ = '''videomae.encoder.layer.'''
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val
return orig_state_dict
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
snake_case_ = np.load(__UpperCAmelCase )
return list(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = get_videomae_config(__UpperCAmelCase )
if "finetuned" in model_name:
snake_case_ = VideoMAEForVideoClassification(__UpperCAmelCase )
else:
snake_case_ = VideoMAEForPreTraining(__UpperCAmelCase )
# download original checkpoint, hosted on Google Drive
snake_case_ = '''pytorch_model.bin'''
gdown.cached_download(__UpperCAmelCase, __UpperCAmelCase, quiet=__UpperCAmelCase )
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
if "model" in files:
snake_case_ = files['''model''']
else:
snake_case_ = files['''module''']
snake_case_ = convert_state_dict(__UpperCAmelCase, __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# verify model on basic input
snake_case_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
snake_case_ = prepare_video()
snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' )
if "finetuned" not in model_name:
snake_case_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
snake_case_ = torch.load(__UpperCAmelCase )
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.logits
snake_case_ = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case_ = torch.Size([1, 174] )
snake_case_ = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case_ = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case_ = torch.Size([1, 174] )
snake_case_ = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case_ = torch.Size([1, 174] )
snake_case_ = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(F"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3], __UpperCAmelCase, atol=1e-4 )
else:
print('''Logits:''', logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3], __UpperCAmelCase, atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case_ = outputs.loss
assert torch.allclose(__UpperCAmelCase, __UpperCAmelCase, atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__UpperCAmelCase, organization='''nielsr''' )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 56 |
'''simple docstring'''
import math
import unittest
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
with self.assertRaises(lowercase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 239 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
_snake_case = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
_snake_case = {
"RUCAIBox/mvp": 1024,
}
class UpperCAmelCase_ ( UpperCamelCase_):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ = MvpTokenizer
def __init__( self, __a=None, __a=None, __a=None, __a="replace", __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=False, __a=True, **__a, ):
'''simple docstring'''
super().__init__(
_a, _a, tokenizer_file=_a, errors=_a, bos_token=_a, eos_token=_a, sep_token=_a, cls_token=_a, unk_token=_a, pad_token=_a, mask_token=_a, add_prefix_space=_a, trim_offsets=_a, **_a, )
_lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", _a) != add_prefix_space:
_lowerCAmelCase : Union[str, Any] = getattr(_a, pre_tok_state.pop("type"))
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : str = pre_tok_class(**_a)
_lowerCAmelCase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase : List[str] = """post_processor"""
_lowerCAmelCase : str = getattr(self.backend_tokenizer, _a, _a)
if tokenizer_component_instance:
_lowerCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : str = tuple(state["sep"])
if "cls" in state:
_lowerCAmelCase : str = tuple(state["cls"])
_lowerCAmelCase : str = False
if state.get("add_prefix_space", _a) != add_prefix_space:
_lowerCAmelCase : List[str] = add_prefix_space
_lowerCAmelCase : List[Any] = True
if state.get("trim_offsets", _a) != trim_offsets:
_lowerCAmelCase : Any = trim_offsets
_lowerCAmelCase : Tuple = True
if changes_to_apply:
_lowerCAmelCase : Optional[int] = getattr(_a, state.pop("type"))
_lowerCAmelCase : Dict = component_class(**_a)
setattr(self.backend_tokenizer, _a, _a)
@property
def snake_case__ ( self):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = AddedToken(_a, lstrip=_a, rstrip=_a) if isinstance(_a, _a) else value
_lowerCAmelCase : Any = value
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get("is_split_into_words", _a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*_a, **_a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.get("is_split_into_words", _a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._encode_plus(*_a, **_a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self._tokenizer.model.save(_a, name=_a)
return tuple(_a)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 362 |
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Any:
'''simple docstring'''
def decorator(_lowercase : List[str] ):
a : Optional[Any] = getattr(_lowercase , "handle_key" , [] )
handle += [key]
setattr(_lowercase , "handle_key" , _lowercase )
return func
return decorator
def _SCREAMING_SNAKE_CASE ( *_lowercase : List[str] ) ->Any:
'''simple docstring'''
def decorator(_lowercase : str ):
a : Optional[Any] = getattr(_lowercase , "handle_key" , [] )
handle += keys
setattr(_lowercase , "handle_key" , _lowercase )
return func
return decorator
class __UpperCamelCase ( a__ ):
def __new__( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : int = super().__new__(cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not hasattr(lowerCAmelCase__ , "key_handler" ):
setattr(lowerCAmelCase__ , "key_handler" , {} )
setattr(lowerCAmelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
a : Tuple = getattr(lowerCAmelCase__ , "handle_key" , [] )
for key in handled_keys:
a : Union[str, Any] = value
return new_cls
@staticmethod
def __a ( cls ) -> int:
a : List[str] = get_character()
if char != KEYMAP["undefined"]:
a : Tuple = ord(lowerCAmelCase__ )
a : List[Any] = cls.key_handler.get(lowerCAmelCase__ )
if handler:
a : Dict = char
return handler(cls )
else:
return None
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 105 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : List[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : List[Any] = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 105 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
__UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] =RealmTokenizer
def __init__( self : str , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict="[UNK]" , lowerCAmelCase : Tuple="[SEP]" , lowerCAmelCase : Tuple="[PAD]" , lowerCAmelCase : Optional[int]="[CLS]" , lowerCAmelCase : Tuple="[MASK]" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=None , **lowerCAmelCase : str , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
__lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase : Tuple = getattr(lowerCAmelCase , normalizer_state.pop("""type""" ) )
__lowerCAmelCase : Union[str, Any] = do_lower_case
__lowerCAmelCase : Any = strip_accents
__lowerCAmelCase : List[str] = tokenize_chinese_chars
__lowerCAmelCase : Any = normalizer_class(**lowerCAmelCase )
__lowerCAmelCase : Tuple = do_lower_case
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Dict , **lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase : Tuple = text
__lowerCAmelCase : Tuple = kwargs.pop("""text_pair""" , lowerCAmelCase )
__lowerCAmelCase : List[Any] = kwargs.pop("""return_tensors""" , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCAmelCase ):
if batch_text_pair is not None:
__lowerCAmelCase : Union[str, Any] = batch_text_pair[idx]
else:
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Any = super().__call__(lowerCAmelCase , lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Dict = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase : int = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase : List[str] = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCAmelCase )
__lowerCAmelCase : List[Any] = {key: item for key, item in output_data.items() if len(lowerCAmelCase ) != 0}
return BatchEncoding(lowerCAmelCase , tensor_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : str , lowerCAmelCase : str=None ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowerCAmelCase : int = [self.sep_token_id]
__lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 139 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=5_12 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]=4 , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : List[Any] = use_attention_mask
__lowerCAmelCase : List[Any] = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : Optional[int] = num_choices
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = config_and_inputs
__lowerCAmelCase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : Any = True
__lowerCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int =True
lowerCamelCase : Any =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = FlaxBertModel.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 139 | 1 |
from ...processing_utils import ProcessorMixin
class A ( __UpperCAmelCase ):
__snake_case = ['image_processor', 'feature_extractor']
__snake_case = 'TvltImageProcessor'
__snake_case = 'TvltFeatureExtractor'
def __init__( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
super().__init__(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = image_processor
lowerCAmelCase_ = feature_extractor
def __call__( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=False, UpperCamelCase__=False, *UpperCamelCase__, **UpperCamelCase__, ):
"""simple docstring"""
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase_ = None
if images is not None:
lowerCAmelCase_ = self.image_processor(UpperCamelCase__, mask_pixel=UpperCamelCase__, *UpperCamelCase__, **UpperCamelCase__ )
if images_mixed is not None:
lowerCAmelCase_ = self.image_processor(UpperCamelCase__, is_mixed=UpperCamelCase__, *UpperCamelCase__, **UpperCamelCase__ )
if audio is not None:
lowerCAmelCase_ = self.feature_extractor(
UpperCamelCase__, *UpperCamelCase__, sampling_rate=UpperCamelCase__, mask_audio=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = {}
if audio is not None:
output_dict.update(UpperCamelCase__ )
if images is not None:
output_dict.update(UpperCamelCase__ )
if images_mixed_dict is not None:
output_dict.update(UpperCamelCase__ )
return output_dict
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processor.model_input_names
lowerCAmelCase_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 278 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
_A = 3_0_0 # TEMPERATURE (unit = K)
def UpperCAmelCase ( a_, a_, a_, ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_A = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _lowercase :
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> int:
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def _UpperCamelCase ( self ) -> Dict:
return self.major, self.minor, self.patch
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Optional[int]:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return Version(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return other
raise TypeError(F"""{other} (type {type(UpperCAmelCase_ )}) cannot be compared to version.""" )
def __eq__( self , UpperCAmelCase_ ) -> Optional[Any]:
try:
lowerCamelCase : List[str] = self._validate_operand(UpperCAmelCase_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : Optional[int] = self._validate_operand(UpperCAmelCase_ )
return self.tuple < other.tuple
def __hash__( self ) -> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _UpperCamelCase ( cls , UpperCAmelCase_ ) -> Union[str, Any]:
lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _UpperCamelCase ( self ) -> str:
return self.version_str
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = _VERSION_REG.match(a_ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(a_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return ".".join(str(a_ ) for v in version_tuple )
| 205 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=0.9_9_9 , _UpperCAmelCase : Tuple="cosine" , ) -> int:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : Optional[int] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : Optional[int] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_UpperCAmelCase : str = []
for i in range(_UpperCAmelCase ):
_UpperCAmelCase : int = i / num_diffusion_timesteps
_UpperCAmelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase: List[Any] = 2
@register_to_config
def __init__( self : str , A : int = 1000 , A : float = 0.00_085 , A : float = 0.012 , A : str = "linear" , A : Optional[Union[np.ndarray, List[float]]] = None , A : str = "epsilon" , A : Optional[bool] = False , A : Optional[bool] = False , A : float = 1.0 , A : str = "linspace" , A : int = 0 , ):
if trained_betas is not None:
_UpperCAmelCase : Optional[Any] = torch.tensor(A , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase : Dict = torch.linspace(A , A , A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : Optional[Any] = betas_for_alpha_bar(A , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
_UpperCAmelCase : List[str] = betas_for_alpha_bar(A , alpha_transform_type="exp" )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_UpperCAmelCase : Optional[int] = 1.0 - self.betas
_UpperCAmelCase : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A , A , A )
_UpperCAmelCase : Union[str, Any] = use_karras_sigmas
def _A ( self : Tuple , A : List[Any] , A : str=None ):
if schedule_timesteps is None:
_UpperCAmelCase : Dict = self.timesteps
_UpperCAmelCase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCAmelCase : Optional[int] = 1 if len(A ) > 1 else 0
else:
_UpperCAmelCase : List[str] = timestep.cpu().item() if torch.is_tensor(A ) else timestep
_UpperCAmelCase : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _A ( self : List[str] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _A ( self : Optional[Any] , A : torch.FloatTensor , A : Union[float, torch.FloatTensor] , ):
_UpperCAmelCase : List[str] = self.index_for_timestep(A )
_UpperCAmelCase : int = self.sigmas[step_index]
_UpperCAmelCase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _A ( self : Optional[Any] , A : int , A : Union[str, torch.device] = None , A : Optional[int] = None , ):
_UpperCAmelCase : str = num_inference_steps
_UpperCAmelCase : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCAmelCase : List[str] = np.linspace(0 , num_train_timesteps - 1 , A , dtype=A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCAmelCase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Tuple = (np.arange(0 , A ) * step_ratio).round()[::-1].copy().astype(A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCAmelCase : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Optional[Any] = (np.arange(A , 0 , -step_ratio )).round().copy().astype(A )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_UpperCAmelCase : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCAmelCase : List[Any] = np.log(A )
_UpperCAmelCase : int = np.interp(A , np.arange(0 , len(A ) ) , A )
if self.config.use_karras_sigmas:
_UpperCAmelCase : List[str] = self._convert_to_karras(in_sigmas=A , num_inference_steps=self.num_inference_steps )
_UpperCAmelCase : str = np.array([self._sigma_to_t(A , A ) for sigma in sigmas] )
_UpperCAmelCase : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCAmelCase : List[str] = torch.from_numpy(A ).to(device=A )
_UpperCAmelCase : Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCAmelCase : Optional[Any] = torch.from_numpy(A )
_UpperCAmelCase : Optional[Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A ).startswith("mps" ):
# mps does not support float64
_UpperCAmelCase : Union[str, Any] = timesteps.to(A , dtype=torch.floataa )
else:
_UpperCAmelCase : List[str] = timesteps.to(device=A )
# empty dt and derivative
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCAmelCase : int = defaultdict(A )
def _A ( self : Tuple , A : Optional[int] , A : Tuple ):
# get log sigma
_UpperCAmelCase : Dict = np.log(A )
# get distribution
_UpperCAmelCase : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_UpperCAmelCase : Union[str, Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_UpperCAmelCase : List[Any] = low_idx + 1
_UpperCAmelCase : List[Any] = log_sigmas[low_idx]
_UpperCAmelCase : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_UpperCAmelCase : Optional[Any] = (low - log_sigma) / (low - high)
_UpperCAmelCase : List[str] = np.clip(A , 0 , 1 )
# transform interpolation to time range
_UpperCAmelCase : Tuple = (1 - w) * low_idx + w * high_idx
_UpperCAmelCase : Optional[Any] = t.reshape(sigma.shape )
return t
def _A ( self : Tuple , A : torch.FloatTensor , A : List[str] ):
_UpperCAmelCase : float = in_sigmas[-1].item()
_UpperCAmelCase : float = in_sigmas[0].item()
_UpperCAmelCase : Union[str, Any] = 7.0 # 7.0 is the value used in the paper
_UpperCAmelCase : List[Any] = np.linspace(0 , 1 , A )
_UpperCAmelCase : List[Any] = sigma_min ** (1 / rho)
_UpperCAmelCase : Tuple = sigma_max ** (1 / rho)
_UpperCAmelCase : List[Any] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _A ( self : List[Any] ):
return self.dt is None
def _A ( self : Tuple , A : Union[torch.FloatTensor, np.ndarray] , A : Union[float, torch.FloatTensor] , A : Union[torch.FloatTensor, np.ndarray] , A : bool = True , ):
_UpperCAmelCase : Dict = self.index_for_timestep(A )
# advance index counter by 1
_UpperCAmelCase : int = timestep.cpu().item() if torch.is_tensor(A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCAmelCase : Any = self.sigmas[step_index]
_UpperCAmelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_UpperCAmelCase : Optional[int] = self.sigmas[step_index - 1]
_UpperCAmelCase : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCAmelCase : str = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase : int = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_UpperCAmelCase : Tuple = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_UpperCAmelCase : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCAmelCase : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCAmelCase : List[str] = sigma_next - sigma_hat
# store for 2nd order step
_UpperCAmelCase : Optional[int] = derivative
_UpperCAmelCase : List[str] = dt
_UpperCAmelCase : int = sample
else:
# 2. 2nd order / Heun's method
_UpperCAmelCase : str = (sample - pred_original_sample) / sigma_next
_UpperCAmelCase : Optional[int] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_UpperCAmelCase : Union[str, Any] = self.dt
_UpperCAmelCase : Optional[Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _A ( self : Optional[Any] , A : torch.FloatTensor , A : torch.FloatTensor , A : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_UpperCAmelCase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A ):
# mps does not support float64
_UpperCAmelCase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCAmelCase : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCAmelCase : Tuple = self.timesteps.to(original_samples.device )
_UpperCAmelCase : int = timesteps.to(original_samples.device )
_UpperCAmelCase : Optional[Any] = [self.index_for_timestep(A , A ) for t in timesteps]
_UpperCAmelCase : Any = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCAmelCase : Dict = sigma.unsqueeze(-1 )
_UpperCAmelCase : List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str] ):
return self.config.num_train_timesteps
| 31 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCamelCase = ksize + 1
_UpperCamelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
_UpperCamelCase = x - ksize // 2
_UpperCamelCase = y - ksize // 2
# degree to radiant
_UpperCamelCase = theta / 1_80 * np.pi
_UpperCamelCase = np.cos(_theta )
_UpperCamelCase = np.sin(_theta )
# get kernel x
_UpperCamelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_a = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_a = out / out.max() * 255
_a = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 194 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 5_1_2,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCAmelCase (_a ):
"""simple docstring"""
_UpperCAmelCase :Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :Optional[Any] = RetriBertTokenizer
_UpperCAmelCase :str = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowercase__: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
lowercase__: Optional[Any] = getattr(_a , normalizer_state.pop('''type''' ) )
lowercase__: List[Any] = do_lower_case
lowercase__: List[str] = strip_accents
lowercase__: Any = tokenize_chinese_chars
lowercase__: List[str] = normalizer_class(**_a )
lowercase__: List[str] = do_lower_case
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
lowercase__: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: int = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 369 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Optional[int] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: List[str] = use_labels
lowercase__: Union[str, Any] = vocab_size
lowercase__: str = hidden_size
lowercase__: Any = embedding_size
lowercase__: Any = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Tuple = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = num_labels
lowercase__: int = num_choices
lowercase__: int = scope
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Any = None
lowercase__: str = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_choices
lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: int = MobileBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(
__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , )
__A = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase )
lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowercase__: Tuple = model(_UpperCAmelCase )[0]
lowercase__: Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 2 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: str = "levit"
def __init__( self : Any , _A : str=224 , _A : List[Any]=3 , _A : List[str]=3 , _A : Dict=2 , _A : int=1 , _A : Any=16 , _A : Dict=[128, 256, 384] , _A : Dict=[4, 8, 12] , _A : Optional[int]=[4, 4, 4] , _A : Any=[16, 16, 16] , _A : int=0 , _A : Tuple=[2, 2, 2] , _A : Union[str, Any]=[2, 2, 2] , _A : Optional[Any]=0.0_2 , **_A : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Optional[Any] = image_size
snake_case_ : List[str] = num_channels
snake_case_ : str = kernel_size
snake_case_ : int = stride
snake_case_ : List[str] = padding
snake_case_ : Optional[Any] = hidden_sizes
snake_case_ : str = num_attention_heads
snake_case_ : List[Any] = depths
snake_case_ : List[Any] = key_dim
snake_case_ : Any = drop_path_rate
snake_case_ : str = patch_size
snake_case_ : Any = attention_ratio
snake_case_ : Dict = mlp_ratio
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Tuple = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-4
| 327 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="mask2former"
UpperCamelCase__ : Dict =["swin"]
UpperCamelCase__ : Optional[int] ={"hidden_size": "hidden_dim"}
def __init__( self :List[str] , _lowercase :Optional[Dict] = None , _lowercase :int = 256 , _lowercase :int = 256 , _lowercase :int = 256 , _lowercase :int = 1024 , _lowercase :str = "relu" , _lowercase :int = 6 , _lowercase :int = 10 , _lowercase :int = 8 , _lowercase :float = 0.0 , _lowercase :int = 2048 , _lowercase :bool = False , _lowercase :bool = False , _lowercase :int = 4 , _lowercase :int = 255 , _lowercase :int = 100 , _lowercase :float = 0.1 , _lowercase :float = 2.0 , _lowercase :float = 5.0 , _lowercase :float = 5.0 , _lowercase :int = 12544 , _lowercase :float = 3.0 , _lowercase :float = 0.75 , _lowercase :float = 0.02 , _lowercase :float = 1.0 , _lowercase :bool = True , _lowercase :List[int] = [4, 8, 16, 32] , _lowercase :bool = None , **_lowercase :Tuple , ) -> Optional[Any]:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''')
UpperCAmelCase_ = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowercase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = backbone_config.pop('''model_type''')
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(_lowercase)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
f"Supported model types: {','.join(self.backbones_supported)}")
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = feature_size
UpperCAmelCase_ = mask_feature_size
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = encoder_feedforward_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = dim_feedforward
UpperCAmelCase_ = pre_norm
UpperCAmelCase_ = enforce_input_projection
UpperCAmelCase_ = common_stride
UpperCAmelCase_ = ignore_value
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = no_object_weight
UpperCAmelCase_ = class_weight
UpperCAmelCase_ = mask_weight
UpperCAmelCase_ = dice_weight
UpperCAmelCase_ = train_num_points
UpperCAmelCase_ = oversample_ratio
UpperCAmelCase_ = importance_sample_ratio
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = feature_strides
UpperCAmelCase_ = output_auxiliary_logits
UpperCAmelCase_ = decoder_layers
super().__init__(**_lowercase)
@classmethod
def __a ( cls :Optional[Any] , _lowercase :PretrainedConfig , **_lowercase :str) -> Dict:
return cls(
backbone_config=_lowercase , **_lowercase , )
def __a ( self :Any) -> Dict[str, any]:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 344 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
__lowerCAmelCase : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCAmelCase : Any = 1 - (matter_density + radiation_density + dark_energy)
__lowerCAmelCase : Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCAmelCase : Tuple = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 86 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Optional[Any], a_: List[Any]=768, a_: Dict=12, a_: List[str]=3, a_: Union[str, Any]=16, a_: List[str]=288, a_: List[Any]=1, a_: List[Any]=1E-05, a_: int=False, a_: Optional[int]=True, a_: Optional[int]=False, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : Optional[int] = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Tuple = num_channels
_snake_case : Tuple = patch_size
_snake_case : Optional[int] = image_size
_snake_case : str = initializer_factor
_snake_case : List[str] = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Optional[int] = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Tuple, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: Optional[int], a_: Dict=50_265, a_: str=768, a_: int=12, a_: str=12, a_: Tuple=1, a_: Optional[int]=3_072, a_: Any="gelu", a_: Union[str, Any]=0.1, a_: List[Any]=0.1, a_: List[str]=514, a_: List[str]=1, a_: List[Any]=1E-05, a_: Optional[Any]=1, a_: List[Any]=0, a_: Any=2, a_: List[Any]="absolute", a_: Dict=True, **a_: str, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : int = hidden_act
_snake_case : Optional[Any] = initializer_factor
_snake_case : str = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Any = use_cache
_snake_case : str = pad_token_id
_snake_case : Optional[Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: Optional[int], a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Any = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Optional[Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: Optional[int], a_: Optional[Any]=True, a_: Optional[int]="gelu", a_: Union[str, Any]=768, a_: Tuple=1, a_: Optional[int]=1E-05, a_: List[str]=False, a_: str="add", a_: str=12, a_: Dict=6, a_: Optional[Any]=False, a_: List[str]=False, a_: str=None, a_: int=None, **a_: Dict, ):
'''simple docstring'''
_snake_case : int = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Any = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : int = share_cross_modal_transformer_layers
_snake_case : int = hidden_act
_snake_case : Tuple = hidden_size
_snake_case : Tuple = initializer_factor
_snake_case : str = layer_norm_eps
_snake_case : Any = share_link_tower_layers
_snake_case : Union[str, Any] = link_tower_type
_snake_case : Dict = num_attention_heads
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Any = tie_word_embeddings
_snake_case : Union[str, Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Dict = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : Dict = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Union[str, Any] = BridgeTowerTextConfig(**a_ )
_snake_case : str = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: int ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = copy.deepcopy(self.__dict__ )
_snake_case : Any = self.text_config.to_dict()
_snake_case : List[Any] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 132 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[Any] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
_snake_case : Any = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Dict = tensor[:sequence_length]
else:
_snake_case : List[Any] = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : str = tensor[:sequence_length]
else:
_snake_case : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : str = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Union[str, Any] = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
import torch
_snake_case : Optional[Any] = """label""" if """label""" in features[0].keys() else """labels"""
_snake_case : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : Any = self.tokenizer.pad(
a_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""" if labels is None else None, )
if labels is None:
return batch
_snake_case : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
_snake_case : Any = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : Optional[int] = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
_snake_case : Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
_snake_case : List[Any] = [feature["""ner_tags"""] for feature in features]
_snake_case : str = padding_tensor(a_, -1, a_, a_ )
_snake_case : Any = [feature["""original_entity_spans"""] for feature in features]
_snake_case : int = padding_tensor(a_, (-1, -1), a_, a_ )
_snake_case : str = {k: torch.tensor(a_, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 132 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 88 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = None , ) ->int:
super().__init__()
lowerCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCAmelCase = [1, 0]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = True , ) ->Tuple:
lowerCAmelCase = hidden_states
lowerCAmelCase = []
lowerCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCAmelCase = self.transformer_index_for_condition[i]
lowerCAmelCase = self.transformers[transformer_index](
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
| 338 |
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="pt" ) -> Any:
lowerCAmelCase = {"""add_prefix_space""": True} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not line.startswith(""" """ ) else {}
lowerCAmelCase = padding_side
return tokenizer(
[line] , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" if pad_to_max_length else None , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> int:
lowerCAmelCase = input_ids.ne(_SCREAMING_SNAKE_CASE ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_="train" , A_=None , A_=None , A_=None , A_="" , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase = Path(A_ ).joinpath(type_path + """.source""" )
lowerCAmelCase = Path(A_ ).joinpath(type_path + """.target""" )
lowerCAmelCase = self.get_char_lens(self.src_file )
lowerCAmelCase = max_source_length
lowerCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
lowerCAmelCase = tokenizer
lowerCAmelCase = prefix
if n_obs is not None:
lowerCAmelCase = self.src_lens[:n_obs]
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
def __len__( self ) -> Tuple:
return len(self.src_lens )
def __getitem__( self , A_ ) -> Dict[str, torch.Tensor]:
lowerCAmelCase = index + 1 # linecache starts at 1
lowerCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip("""\n""" )
lowerCAmelCase = linecache.getline(str(self.tgt_file ) , A_ ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
lowerCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
lowerCAmelCase = encode_line(A_ , A_ , self.max_source_length , """right""" )
lowerCAmelCase = encode_line(A_ , A_ , self.max_target_length , """right""" )
lowerCAmelCase = source_inputs["""input_ids"""].squeeze()
lowerCAmelCase = target_inputs["""input_ids"""].squeeze()
lowerCAmelCase = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( A_ ) -> Optional[int]:
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def __snake_case ( self , A_ ) -> Dict[str, torch.Tensor]:
lowerCAmelCase = torch.stack([x["""input_ids"""] for x in batch] )
lowerCAmelCase = torch.stack([x["""attention_mask"""] for x in batch] )
lowerCAmelCase = torch.stack([x["""decoder_input_ids"""] for x in batch] )
lowerCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
lowerCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
lowerCAmelCase = trim_batch(A_ , A_ )
lowerCAmelCase, lowerCAmelCase = trim_batch(A_ , A_ , attention_mask=A_ )
lowerCAmelCase = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def _snake_case ( _SCREAMING_SNAKE_CASE : List[List] ) -> List[Any]:
return list(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> None:
lowerCAmelCase = get_git_info()
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """git_log.json""" ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any]=4 , **_SCREAMING_SNAKE_CASE : int ) -> List[str]:
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
with open(_SCREAMING_SNAKE_CASE ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
def _snake_case ( ) -> Dict:
lowerCAmelCase = git.Repo(search_parent_directories=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
"""repo_id""": str(_SCREAMING_SNAKE_CASE ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _snake_case ( _SCREAMING_SNAKE_CASE : Callable , _SCREAMING_SNAKE_CASE : Iterable ) -> List:
return list(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
return pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : Optional[Any] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[str] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> List[str]:
lowerCAmelCase = normalize_answer(_SCREAMING_SNAKE_CASE ).split()
lowerCAmelCase = normalize_answer(_SCREAMING_SNAKE_CASE ).split()
lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) & Counter(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = sum(common.values() )
if num_same == 0:
return 0
lowerCAmelCase = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ) -> int:
return normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 0
for hypo, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
em += exact_match_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
em /= len(_SCREAMING_SNAKE_CASE )
return {"em": em}
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
return model_prefix.startswith("""rag""" )
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
lowerCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase = """dropout_rate"""
for p in extra_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not hasattr(_SCREAMING_SNAKE_CASE , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(_SCREAMING_SNAKE_CASE ) )
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
lowerCAmelCase = p if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else equivalent_param[p]
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return hparams, config
| 354 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 999_999_999
lowerCAmelCase = 0
lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase = remaining_time[j]
lowerCAmelCase = j
lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase = remaining_time[short]
if minm == 0:
lowerCAmelCase = 999_999_999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase = False
# Find finish time of current process
lowerCAmelCase = increment_time + 1
# Calculate waiting time
lowerCAmelCase = finish_time - arrival_time[short]
lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = total_waiting_time + waiting_time[i]
lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase = int(input())
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase = map(int, input().split())
UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase = burst_time
UpperCAmelCase = no_of_processes
UpperCAmelCase = waiting_time
UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 187 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 135 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _snake_case ( a__ ):
snake_case__ = "camembert"
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any]=30522 , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Tuple=1E-12 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , UpperCAmelCase : str="absolute" , UpperCAmelCase : Dict=True , UpperCAmelCase : int=None , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : List[Any] = classifier_dropout
class _snake_case ( a__ ):
@property
def lowerCamelCase__ ( self : int ):
if self.task == "multiple-choice":
__lowerCamelCase : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 135 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case ( ) -> str:
_A = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=snake_case__)
_A = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=snake_case__)
env_command_parser(subparsers=snake_case__)
launch_command_parser(subparsers=snake_case__)
tpu_command_parser(subparsers=snake_case__)
test_command_parser(subparsers=snake_case__)
# Let's go
_A = parser.parse_args()
if not hasattr(snake_case__ , """func"""):
parser.print_help()
exit(1)
# Run
args.func(snake_case__)
if __name__ == "__main__":
main()
| 359 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = "openai/clip-vit-large-patch14" ) -> None:
_A = device
_A = CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
_A = [0.4814_5466, 0.457_8275, 0.4082_1073]
_A = [0.2686_2954, 0.2613_0258, 0.2757_7711]
_A = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_A = torchvision.transforms.Resize(2_24 )
_A = torchvision.transforms.CenterCrop(2_24 )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.resize(lowerCAmelCase_ )
_A = self.center_crop(lowerCAmelCase_ )
_A = self.normalize(lowerCAmelCase_ )
return images
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A = self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = self.preprocess_img(lowerCAmelCase_ )
_A = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=10 , lowerCAmelCase_=0.01 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="image" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> None:
super().__init__()
_A = None
_A = device if device else get_device()
if vqgan:
_A = vqgan
else:
_A = load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
_A = clip
else:
_A = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_A = ProcessorGradientFlow(device=self.device )
_A = iterations
_A = lr
_A = log
_A = make_grid
_A = return_val
_A = quantize
_A = self.vqgan.decoder.z_shape
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5 , lowerCAmelCase_=True ) -> Any:
_A = []
if output_path is None:
_A = """./animation.gif"""
if input_path is None:
_A = self.save_path
_A = sorted(glob(input_path + """/*""" ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowerCAmelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_A = total_duration / len(lowerCAmelCase_ )
_A = [frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
_A = 1.5
_A = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ )
print(F'''gif saved to {output_path}''' )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> str:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_A = preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=2_56 ).to(self.device )
_A = preprocess_vqgan(lowerCAmelCase_ )
_A , *_A = self.vqgan.encode(lowerCAmelCase_ )
return z
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_A = self.latent.detach().requires_grad_()
_A = base_latent + transform_vector
if self.quantize:
_A , *_A = self.vqgan.quantize(lowerCAmelCase_ )
else:
_A = trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_A = self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ )
_A = self.clip(**lowerCAmelCase_ )
_A = clip_outputs.logits_per_image
if weights is not None:
_A = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_A = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase_ , weights=neg_prompts["""weights"""] )
else:
_A = torch.tensor([1] , device=self.device )
_A = -torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device )
_A = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_A = self._add_vector(lowerCAmelCase_ )
_A = loop_post_process(lowerCAmelCase_ )
_A = self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print("""CLIP loss""" , lowerCAmelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
wandb.init(reinit=lowerCAmelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_A = Image.open(lowerCAmelCase_ )
_A = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if not prompts:
return []
_A = []
_A = []
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowerCAmelCase_ , (tuple, list) ):
_A = prompt[0]
_A = float(prompt[1] )
elif ":" in prompt:
_A , _A = prompt.split(""":""" )
_A = float(lowerCAmelCase_ )
else:
_A = prompt
_A = 1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_ , device=self.device ),
}
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , ) -> str:
if image_path:
_A = self._get_latent(lowerCAmelCase_ )
else:
_A = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
_A = self.process_prompts(lowerCAmelCase_ )
_A = self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
_A = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
_A = save_path + """_""" + get_timestamp()
os.makedirs(lowerCAmelCase_ )
_A = save_path
_A = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
_A = loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 81 | 0 |
def lowerCAmelCase__ ( ) -> Any:
'''simple docstring'''
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any:
'''simple docstring'''
A__ = 1
A__ = 2
while i * i <= n:
A__ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 68 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
lowerCAmelCase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
lowerCAmelCase_ : List[Any] = '''xvjiarui/stable-diffusion-2-inpainting'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_)
lowerCAmelCase_ : List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCAmelCase_ : List[Any] = jax.random.PRNGKey(0)
lowerCAmelCase_ : str = 5_0
lowerCAmelCase_ : List[Any] = jax.device_count()
lowerCAmelCase_ : Union[str, Any] = num_samples * [prompt]
lowerCAmelCase_ : str = num_samples * [init_image]
lowerCAmelCase_ : Union[str, Any] = num_samples * [mask_image]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pipeline.prepare_inputs(A_ , A_ , A_)
# shard inputs and rng
lowerCAmelCase_ : str = replicate(A_)
lowerCAmelCase_ : str = jax.random.split(A_ , jax.device_count())
lowerCAmelCase_ : List[Any] = shard(A_)
lowerCAmelCase_ : str = shard(A_)
lowerCAmelCase_ : Tuple = shard(A_)
lowerCAmelCase_ : int = pipeline(
A_ , A_ , A_ , A_ , A_ , A_ , jit=A_)
lowerCAmelCase_ : Optional[int] = output.images.reshape(A_ , 5_1_2 , 5_1_2 , 3)
lowerCAmelCase_ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase_ : int = jnp.asarray(jax.device_get(image_slice.flatten()))
lowerCAmelCase_ : str = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 103 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : str=13 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : str=37 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : List[str]=0.9 , __lowerCamelCase : Any=None , ):
A : Optional[int] = parent
A : int = batch_size
A : Any = image_size
A : Optional[Any] = num_channels
A : str = patch_size
A : Any = tubelet_size
A : int = num_frames
A : Optional[Any] = is_training
A : Optional[int] = use_labels
A : Union[str, Any] = hidden_size
A : Optional[Any] = num_hidden_layers
A : str = num_attention_heads
A : List[str] = intermediate_size
A : List[Any] = hidden_act
A : List[str] = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : Dict = type_sequence_label_size
A : Union[str, Any] = initializer_range
A : int = mask_ratio
A : Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A : Union[str, Any] = (image_size // patch_size) ** 2
A : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A : str = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
A : Tuple = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
A : Optional[int] = VideoMAEModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
A : int = VideoMAEForPreTraining(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A : int = torch.ones((self.num_masks,) )
A : int = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A : List[str] = mask.expand(self.batch_size , -1 ).bool()
A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
# model only returns predictions for masked patches
A : List[Any] = mask.sum().item()
A : List[str] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
A : List[Any] = self.prepare_config_and_inputs()
A : Optional[int] = config_and_inputs
A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a__ = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
A : List[str] = VideoMAEModelTester(self )
A : Dict = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=False ):
A : List[str] = copy.deepcopy(__lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A : Union[str, Any] = torch.ones((self.model_tester.num_masks,) )
A : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A : Tuple = mask.expand(self.model_tester.batch_size , -1 ).bool()
A : Union[str, Any] = bool_masked_pos.to(__lowerCamelCase )
if return_labels:
if model_class in [
*get_values(__lowerCamelCase ),
]:
A : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Dict = [*signature.parameters.keys()]
A : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : str = VideoMAEModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if not self.has_attentions:
pass
else:
A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = True
for model_class in self.all_model_classes:
A : str = self.model_tester.seq_length - self.model_tester.num_masks
A : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A : Union[str, Any] = True
A : Any = False
A : Dict = True
A : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[str] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : Optional[int] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A : Tuple = True
A : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : Tuple = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A : Dict = len(__lowerCamelCase )
# Check attention is always last and order is fine
A : Union[str, Any] = True
A : List[Any] = True
A : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : Union[str, Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
A : Dict = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : int = outputs.hidden_states
A : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
A : Dict = self.model_tester.seq_length - self.model_tester.num_masks
A : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
pass
def UpperCAmelCase ( ):
A : Any = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A : Optional[Any] = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
A : Optional[Any] = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
__lowerCamelCase )
A : List[str] = self.default_image_processor
A : str = prepare_video()
A : Any = image_processor(__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Any = model(**__lowerCamelCase )
# verify the logits
A : Any = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : int = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
A : Optional[Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(__lowerCamelCase )
A : Optional[int] = self.default_image_processor
A : List[Any] = prepare_video()
A : int = image_processor(__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# add boolean mask, indicating which patches to mask
A : List[str] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
A : str = torch.load(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Any = model(**__lowerCamelCase )
# verify the logits
A : Dict = torch.Size([1, 14_08, 15_36] )
A : List[Any] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__lowerCamelCase )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A : int = torch.tensor([0.5142] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A : Any = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=__lowerCamelCase ).to(
__lowerCamelCase )
with torch.no_grad():
A : int = model(**__lowerCamelCase )
A : Tuple = torch.tensor(torch.tensor([0.6469] ) , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1e-4 ) )
| 351 |
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 0 |
"""simple docstring"""
import socket
def _snake_case ( ):
_lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Union[str, Any] = socket.gethostname()
_lowerCamelCase : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 96 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case__ : List[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Tuple ):
if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_snake_case ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class snake_case_( a__ ):
__UpperCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : Tuple , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_5_6}
lowerCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase : Dict = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
lowerCAmelCase : Any = do_resize
lowerCAmelCase : Union[str, Any] = size
lowerCAmelCase : List[str] = do_center_crop
lowerCAmelCase : int = crop_size
lowerCAmelCase : Dict = resample
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Any = rescale_factor
lowerCAmelCase : List[Any] = offset
lowerCAmelCase : Tuple = do_normalize
lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" in size:
lowerCAmelCase : List[str] = get_resize_output_image_size(UpperCamelCase_ , size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
elif "height" in size and "width" in size:
lowerCAmelCase : Any = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : List[str] = image.astype(np.floataa )
if offset:
lowerCAmelCase : Union[str, Any] = image - (scale / 2)
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = to_numpy_array(UpperCamelCase_ )
if do_resize:
lowerCAmelCase : Optional[int] = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ )
if do_center_crop:
lowerCAmelCase : List[str] = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_ )
if do_rescale:
lowerCAmelCase : str = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , offset=UpperCamelCase_ )
if do_normalize:
lowerCAmelCase : Optional[int] = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ )
lowerCAmelCase : str = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ )
return image
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : List[str] , ):
lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Any = resample if resample is not None else self.resample
lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : str = offset if offset is not None else self.offset
lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Any = image_std if image_std is not None else self.image_std
lowerCAmelCase : List[str] = size if size is not None else self.size
lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : Any = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCAmelCase : List[str] = make_batched(UpperCamelCase_ )
lowerCAmelCase : Dict = [
[
self._preprocess_image(
image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , offset=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , )
for img in video
]
for video in videos
]
lowerCAmelCase : Optional[Any] = {'''pixel_values''': videos}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 60 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Tuple = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class snake_case_ (_lowerCAmelCase ):
UpperCAmelCase__ : Tuple = '''timesformer'''
def __init__( self :Tuple ,__snake_case :Optional[int]=2_24 ,__snake_case :str=16 ,__snake_case :List[str]=3 ,__snake_case :str=8 ,__snake_case :Tuple=7_68 ,__snake_case :List[str]=12 ,__snake_case :str=12 ,__snake_case :str=30_72 ,__snake_case :Optional[Any]="gelu" ,__snake_case :Optional[int]=0.0 ,__snake_case :Any=0.0 ,__snake_case :Optional[int]=0.02 ,__snake_case :str=1E-6 ,__snake_case :Optional[Any]=True ,__snake_case :str="divided_space_time" ,__snake_case :Union[str, Any]=0 ,**__snake_case :Dict ,) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = num_frames
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = qkv_bias
a__ = attention_type
a__ = drop_path_rate
| 366 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
a__ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
a__ = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
a__ = in_proj_weight[
: encoder_config.hidden_size, :
]
a__ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
a__ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
if "handwritten" in checkpoint_url:
a__ = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
a__ = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('RGB' )
return im
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ):
a__ = ViTConfig(image_size=3_8_4 , qkv_bias=__lowerCAmelCase )
a__ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
a__ = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
a__ = 1_0_2_4
a__ = 4_0_9_6
a__ = 2_4
a__ = 1_6
a__ = 1_0_2_4
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
a__ = False
a__ = 'relu'
a__ = 1_0_2_4
a__ = True
a__ = False
a__ = False
# load HuggingFace model
a__ = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase )
a__ = TrOCRForCausalLM(__lowerCAmelCase )
a__ = VisionEncoderDecoderModel(encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' , check_hash=__lowerCAmelCase )['model']
a__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
a__ = state_dict.pop(__lowerCAmelCase )
if key.startswith('decoder' ) and "output_projection" not in key:
a__ = val
else:
a__ = val
# load state dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image
a__ = ViTImageProcessor(size=encoder_config.image_size )
a__ = RobertaTokenizer.from_pretrained('roberta-large' )
a__ = TrOCRProcessor(__lowerCAmelCase , __lowerCAmelCase )
a__ = processor(images=prepare_img(__lowerCAmelCase ) , return_tensors='pt' ).pixel_values
# verify logits
a__ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
a__ = model(pixel_values=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
a__ = outputs.logits
a__ = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
a__ = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
a__ = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
a__ = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
a__ = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , __lowerCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
snake_case : int = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 109 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case( _lowerCAmelCase ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCAmelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCAmelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents
):
snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ )
snake_case__ : str = do_lower_case
def __getstate__( self : int ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : str = BertPreTokenizer()
return state
def __setstate__( self : Dict , snake_case_ : Dict ):
snake_case__ : List[Any] = d
snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ):
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 35 | 1 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=1E-12):
__lowerCAmelCase = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1), a_min=lowerCamelCase)).T
__lowerCAmelCase = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1), a_min=lowerCamelCase)).T
return jnp.matmul(lowerCamelCase, norm_emb_a.T)
class a__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase : CLIPConfig
__UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case (self ):
__lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config )
__lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowercase , dtype=self.dtype )
__lowerCAmelCase = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__lowerCAmelCase = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__lowerCAmelCase = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
__lowerCAmelCase = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self , __lowercase ):
__lowerCAmelCase = self.vision_model(__lowercase )[1]
__lowerCAmelCase = self.visual_projection(__lowercase )
__lowerCAmelCase = jax_cosine_distance(__lowercase , self.special_care_embeds )
__lowerCAmelCase = jax_cosine_distance(__lowercase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__lowerCAmelCase = 0.0
__lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__lowerCAmelCase = jnp.round(__lowercase , 3 )
__lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowercase )
# Use a lower threshold if an image has any special care concept
__lowerCAmelCase = is_special_care * 0.0_1
__lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__lowerCAmelCase = jnp.round(__lowercase , 3 )
__lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict = CLIPConfig
__UpperCamelCase : Dict = 'clip_input'
__UpperCamelCase : str = FlaxStableDiffusionSafetyCheckerModule
def __init__(self , __lowercase , __lowercase = None , __lowercase = 0 , __lowercase = jnp.floataa , __lowercase = True , **__lowercase , ):
if input_shape is None:
__lowerCAmelCase = (1, 2_24, 2_24, 3)
__lowerCAmelCase = self.module_class(config=__lowercase , dtype=__lowercase , **__lowercase )
super().__init__(__lowercase , __lowercase , input_shape=__lowercase , seed=__lowercase , dtype=__lowercase , _do_init=_do_init )
def _snake_case (self , __lowercase , __lowercase , __lowercase = None ):
# init input tensor
__lowerCAmelCase = jax.random.normal(__lowercase , __lowercase )
__lowerCAmelCase , __lowerCAmelCase = jax.random.split(__lowercase )
__lowerCAmelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
__lowerCAmelCase = self.module.init(__lowercase , __lowercase )['''params''']
return random_params
def __call__(self , __lowercase , __lowercase = None , ):
__lowerCAmelCase = jnp.transpose(__lowercase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(__lowercase , dtype=jnp.floataa ) , rngs={} , )
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__( lowerCamelCase, lowerCamelCase):
if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2:
raise Exception('''Matrices are not 2x2''')
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase):
if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('''Odd matrices are not supported!''')
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)]
return top_left, top_right, bot_left, bot_right
def __magic_name__( lowerCamelCase):
return len(lowerCamelCase), len(matrix[0])
def __magic_name__( lowerCamelCase):
print('''\n'''.join(str(lowerCamelCase) for line in matrix))
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase) == (2, 2):
return default_matrix_multiplication(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(lowerCamelCase)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]:
__lowerCAmelCase = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase))))
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
__lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase)
# Removing the additional zeros
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCAmelCase : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
_a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_a = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_a = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_a = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_a = field(default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_a = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
_a = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_a = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_a = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def a ( ) -> int:
"""simple docstring"""
_lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
_lowercase =import_module('tasks' )
try:
_lowercase =getattr(A__ , model_args.task_type )
_lowercase =token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_lowercase =token_classification_task.get_labels(data_args.labels )
_lowercase =dict(enumerate(A__ ) )
_lowercase =len(A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , idalabel=A__ , labelaid={label: i for i, label in enumerate(A__ )} , cache_dir=model_args.cache_dir , )
_lowercase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_lowercase =AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowercase =(
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowercase =(
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A__ : np.ndarray , A__ : np.ndarray ) -> Tuple[List[int], List[int]]:
_lowercase =np.argmax(A__ , axis=2 )
_lowercase , _lowercase =preds.shape
_lowercase =[[] for _ in range(A__ )]
_lowercase =[[] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A__ : EvalPrediction ) -> Dict:
_lowercase , _lowercase =align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A__ , A__ ),
"precision": precision_score(A__ , A__ ),
"recall": recall_score(A__ , A__ ),
"f1": fa_score(A__ , A__ ),
}
# Data collator
_lowercase =DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowercase =Trainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowercase ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase =trainer.evaluate()
_lowercase =os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
results.update(A__ )
# Predict
if training_args.do_predict:
_lowercase =TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_lowercase , _lowercase , _lowercase =trainer.predict(A__ )
_lowercase , _lowercase =align_predictions(A__ , A__ )
_lowercase =os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_lowercase =os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A__ , A__ , A__ )
return results
def a ( A__ : str ) -> Optional[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 205 |
def a ( A__ : str , A__ : bool = False ) -> str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
_lowercase =F'''Expected string as input, found {type(A__ )}'''
raise ValueError(A__ )
if not isinstance(A__ , A__ ):
_lowercase =F'''Expected boolean as use_pascal parameter, found {type(A__ )}'''
raise ValueError(A__ )
_lowercase =input_str.split('_' )
_lowercase =0 if use_pascal else 1
_lowercase =words[start_index:]
_lowercase =[word[0].upper() + word[1:] for word in words_to_capitalize]
_lowercase ='' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 205 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> int:
_A = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
_A , _A = input_paths_and_base_extractors[compression_format]
if input_path is None:
_A = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
assert base_extractor.is_extractable(__lowercase )
_A = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__lowercase , __lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_A = file_path.read_text(encoding="utf-8" )
else:
_A = output_path.read_text(encoding="utf-8" )
_A = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[int]:
_A = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
_A = input_paths[compression_format]
if input_path is None:
_A = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
_A = Extractor.infer_extractor_format(__lowercase )
assert extractor_format is not None
_A = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__lowercase , __lowercase , __lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_A = file_path.read_text(encoding="utf-8" )
else:
_A = output_path.read_text(encoding="utf-8" )
_A = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a__ ( __lowercase , __lowercase ) -> int:
import tarfile
_A = tmp_path / "data_dot_dot"
directory.mkdir()
_A = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__lowercase , "w" ) as f:
f.add(__lowercase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def a__ ( __lowercase ) -> str:
import tarfile
_A = tmp_path / "data_sym_link"
directory.mkdir()
_A = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=__lowercase )
with tarfile.TarFile(__lowercase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
_A = insecure_tar_files[insecure_tar_file]
_A = tmp_path / "extracted"
TarExtractor.extract(__lowercase , __lowercase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a__ ( __lowercase ) -> List[str]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_A = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
_A = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__lowercase )
assert zipfile.is_zipfile(str(__lowercase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowercase ) # but we're right
| 357 |
"""simple docstring"""
def a__ ( __lowercase=2_8123 ) -> List[Any]:
_A = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_A = set()
_A = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowercase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 163 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase : Any = 8.988E9 # units = N * m^s * C^-2
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> dict[str, float]:
_snake_case = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
_snake_case = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_snake_case = abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_snake_case = abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_snake_case = (COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase : Any = None
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : List[str] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ : Optional[int] = TaTokenizer
lowerCAmelCase__ : List[int] = []
def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = extra_ids
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , )
return max_model_length
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 2 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a__: Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__: List[str] = 256
class SCREAMING_SNAKE_CASE__ ( __A ):
__SCREAMING_SNAKE_CASE = ["melgan"]
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
super().__init__()
# From MELGAN
A__ = math.log(1E-5 ) # Matches MelGAN training.
A__ = 4.0 # Largest value for most examples
A__ = 128
self.register_modules(
notes_encoder=lowercase_,continuous_encoder=lowercase_,decoder=lowercase_,scheduler=lowercase_,melgan=lowercase_,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=(-1.0, 1.0),__lowerCamelCase=False ):
A__ = output_range
if clip:
A__ = torch.clip(lowercase_,self.min_value,self.max_value )
# Scale to [0, 1].
A__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=(-1.0, 1.0),__lowerCamelCase=False ):
A__ = input_range
A__ = torch.clip(lowercase_,lowercase_,lowercase_ ) if clip else outputs
# Scale to [0, 1].
A__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = input_tokens > 0
A__ = self.notes_encoder(
encoder_input_tokens=lowercase_,encoder_inputs_mask=lowercase_ )
A__ = self.continuous_encoder(
encoder_inputs=lowercase_,encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = noise_time
if not torch.is_tensor(lowercase_ ):
A__ = torch.tensor([timesteps],dtype=torch.long,device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ = timesteps * torch.ones(input_tokens.shape[0],dtype=timesteps.dtype,device=timesteps.device )
A__ = self.decoder(
encodings_and_masks=lowercase_,decoder_input_tokens=lowercase_,decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = 100,__lowerCamelCase = True,__lowerCamelCase = "numpy",__lowerCamelCase = None,__lowerCamelCase = 1,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_,lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(lowercase_ )}." )
A__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims],dtype=np.floataa )
A__ = np.zeros([1, 0, self.n_dims],np.floataa )
A__ = torch.ones((1, TARGET_FEATURE_LENGTH),dtype=lowercase_,device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
A__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device,dtype=self.decoder.dtype )
# The first chunk has no previous context.
A__ = torch.zeros((1, TARGET_FEATURE_LENGTH),dtype=lowercase_,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A__ = ones
A__ = self.scale_features(
lowercase_,output_range=[-1.0, 1.0],clip=lowercase_ )
A__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ),continuous_inputs=lowercase_,continuous_mask=lowercase_,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A__ = randn_tensor(
shape=encoder_continuous_inputs.shape,generator=lowercase_,device=self.device,dtype=self.decoder.dtype,)
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ = self.decode(
encodings_and_masks=lowercase_,input_tokens=lowercase_,noise_time=t / self.scheduler.config.num_train_timesteps,)
# Compute previous output: x_t -> x_t-1
A__ = self.scheduler.step(lowercase_,lowercase_,lowercase_,generator=lowercase_ ).prev_sample
A__ = self.scale_to_features(lowercase_,input_range=[-1.0, 1.0] )
A__ = mel[:1]
A__ = mel.cpu().float().numpy()
A__ = np.concatenate([full_pred_mel, pred_mel[:1]],axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_,lowercase_ )
logger.info('''Generated segment''',lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
A__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 193 |
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 87 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( ):
UpperCAmelCase = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowercase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 181 |
"""simple docstring"""
import math
def _lowerCAmelCase ( lowercase_ ):
assert isinstance(lowercase_ , lowercase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase = range(3 , int(math.sqrt(lowercase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowercase_ , lowercase_=1 , **lowercase_ ):
UpperCAmelCase = factor * value
UpperCAmelCase = value
while not is_prime(lowercase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase_ )
return value
| 181 | 1 |
'''simple docstring'''
import random
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : str ):
__lowercase = [ord(_UpperCAmelCase ) for i in text]
__lowercase = []
__lowercase = []
for i in plain:
__lowercase = random.randint(1 ,3_0_0 )
__lowercase = (i + k) * k
cipher.append(_UpperCAmelCase )
key.append(_UpperCAmelCase )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : list[int] ,lowercase__ : list[int] ):
__lowercase = []
for i in range(len(_UpperCAmelCase ) ):
__lowercase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_UpperCAmelCase ) )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 104 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 0 |
"""simple docstring"""
__A = 'Input must be a string of 8 numbers plus letter'
__A = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _lowerCamelCase(__UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =F'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
_lowerCAmelCase =spanish_id.replace("""-""" , """""" ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
_lowerCAmelCase =int(spanish_id_clean[0:8] )
_lowerCAmelCase =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowerCAmelCase ( self ) -> str:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 341 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_SCREAMING_SNAKE_CASE = re.compile(R"""\s+""")
def SCREAMING_SNAKE_CASE__ ( __a ):
return {"hash": hashlib.mda(re.sub(__a , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Any = [len(__a ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__a ), "line_max": max(__a )}
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def SCREAMING_SNAKE_CASE__ ( __a , __a=5 ):
snake_case_ : Dict = ['auto-generated', 'autogenerated', 'automatically generated']
snake_case_ : List[str] = example['content'].splitlines()
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def SCREAMING_SNAKE_CASE__ ( __a , __a=5 , __a=0.05 ):
snake_case_ : Optional[Any] = ['unit tests', 'test file', 'configuration file']
snake_case_ : Dict = example['content'].splitlines()
snake_case_ : List[Any] = 0
snake_case_ : Dict = 0
# first test
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Union[str, Any] = example['content'].count('\n' )
snake_case_ : List[str] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[int] = ['def ', 'class ', 'for ', 'while ']
snake_case_ : str = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def SCREAMING_SNAKE_CASE__ ( __a , __a=4 ):
snake_case_ : Optional[int] = example['content'].splitlines()
snake_case_ : int = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[int] = tokenizer(example['content'] , truncation=__a )['input_ids']
snake_case_ : Optional[Any] = len(example['content'] ) / len(__a )
return {"ratio": ratio}
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = {}
results.update(get_hash(__a ) )
results.update(line_stats(__a ) )
results.update(alpha_stats(__a ) )
results.update(char_token_ratio(__a ) )
results.update(is_autogenerated(__a ) )
results.update(is_config_or_test(__a ) )
results.update(has_no_keywords(__a ) )
results.update(has_few_assignments(__a ) )
return results
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
if not check_uniques(__a , __a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def SCREAMING_SNAKE_CASE__ ( __a ):
with open(__a , 'rb' ) as f_in:
with gzip.open(str(__a ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__a , __a )
os.unlink(__a )
# Settings
_SCREAMING_SNAKE_CASE = HfArgumentParser(PreprocessingArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
if args.num_workers is None:
_SCREAMING_SNAKE_CASE = multiprocessing.cpu_count()
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = load_dataset(args.dataset_name, split="""train""")
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_SCREAMING_SNAKE_CASE = set(ds.unique("""hash"""))
_SCREAMING_SNAKE_CASE = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_SCREAMING_SNAKE_CASE = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
_SCREAMING_SNAKE_CASE = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
_SCREAMING_SNAKE_CASE = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_SCREAMING_SNAKE_CASE = str(data_dir / F'''file-{file_number+1:012}.json''')
_SCREAMING_SNAKE_CASE = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 327 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 327 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "pix2struct_text_model"
UpperCAmelCase_ = ["past_key_values"]
UpperCAmelCase_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Tuple, _UpperCAmelCase : int=5_0_2_4_4, _UpperCAmelCase : str=7_6_8, _UpperCAmelCase : Any=6_4, _UpperCAmelCase : Tuple=2_0_4_8, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : Dict=3_2, _UpperCAmelCase : Dict=1_2_8, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=1E-6, _UpperCAmelCase : Optional[int]=1.0, _UpperCAmelCase : int="gelu_new", _UpperCAmelCase : str=0, _UpperCAmelCase : Tuple=False, _UpperCAmelCase : Dict=0, _UpperCAmelCase : List[str]=1, _UpperCAmelCase : Tuple=False, _UpperCAmelCase : List[Any]=True, **_UpperCAmelCase : Dict, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = d_kv
SCREAMING_SNAKE_CASE__ : Tuple = d_ff
SCREAMING_SNAKE_CASE__ : Optional[int] = num_layers
SCREAMING_SNAKE_CASE__ : Dict = num_heads
SCREAMING_SNAKE_CASE__ : Dict = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Optional[int] = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Optional[Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_factor
SCREAMING_SNAKE_CASE__ : Any = use_cache
SCREAMING_SNAKE_CASE__ : Dict = eos_token_id
SCREAMING_SNAKE_CASE__ : Any = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, decoder_start_token_id=_UpperCAmelCase, tie_word_embeddings=_UpperCAmelCase, is_decoder=_UpperCAmelCase, **_UpperCAmelCase, )
@classmethod
def A_ ( cls : Optional[Any], _UpperCAmelCase : Union[str, os.PathLike], **_UpperCAmelCase : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase, **_UpperCAmelCase )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "pix2struct_vision_model"
def __init__( self : Optional[Any], _UpperCAmelCase : Any=7_6_8, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Optional[int]=2_0_4_8, _UpperCAmelCase : List[Any]=6_4, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : int=1_2, _UpperCAmelCase : List[str]="gelu_new", _UpperCAmelCase : Any=1E-6, _UpperCAmelCase : str=0.0, _UpperCAmelCase : List[str]=0.0, _UpperCAmelCase : Dict=1E-10, _UpperCAmelCase : int=1.0, _UpperCAmelCase : Any=4_0_9_6, _UpperCAmelCase : Any=3_2, _UpperCAmelCase : List[str]=1_2_8, **_UpperCAmelCase : str, ) -> List[str]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ : str = d_ff
SCREAMING_SNAKE_CASE__ : Optional[int] = dropout_rate
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE__ : Dict = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dense_act_fn
SCREAMING_SNAKE_CASE__ : Tuple = seq_len
SCREAMING_SNAKE_CASE__ : Any = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Tuple = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : List[Any] = d_kv
@classmethod
def A_ ( cls : List[str], _UpperCAmelCase : Union[str, os.PathLike], **_UpperCAmelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase, **_UpperCAmelCase )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "pix2struct"
UpperCAmelCase_ = True
def __init__( self : Any, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : List[Any]=1.0, _UpperCAmelCase : Any=0.02, _UpperCAmelCase : int=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[Any]=True, **_UpperCAmelCase : Union[str, Any], ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=_UpperCAmelCase, is_encoder_decoder=_UpperCAmelCase, **_UpperCAmelCase )
if text_config is None:
SCREAMING_SNAKE_CASE__ : Any = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
SCREAMING_SNAKE_CASE__ : Optional[int] = PixaStructTextConfig(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PixaStructVisionConfig(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = self.initializer_range
SCREAMING_SNAKE_CASE__ : str = self.initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_vqa
@classmethod
def A_ ( cls : str, _UpperCAmelCase : PixaStructTextConfig, _UpperCAmelCase : PixaStructVisionConfig, **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **_UpperCAmelCase )
def A_ ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : int = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = self.__class__.model_type
return output
| 354 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = analyze_text(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE__ : str = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE__ : Optional[int] = single_char_strings[ch]
SCREAMING_SNAKE_CASE__ : Any = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE__ ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE__ : List[str] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE__ : Optional[Any] = two_char_strings[sequence]
SCREAMING_SNAKE_CASE__ : Tuple = int(SCREAMING_SNAKE_CASE__ ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE__ )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Counter() # type: ignore
SCREAMING_SNAKE_CASE__ : Dict = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _a ( ) -> str:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 191 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.